code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import argparse import hashlib # hashlib is only used inside the Test class import struct class a : def __init__( self : Union[str, Any] , __lowerCAmelCase : str ): _UpperCAmelCase = data _UpperCAmelCase = [0x67_45_23_01, 0xef_cd_ab_89, 0x98_ba_dc_fe, 0x10_32_54_76, 0xc3_d2_e1_f0] @staticmethod def lowerCAmelCase_ ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ): return ((n << b) | (n >> (32 - b))) & 0xff_ff_ff_ff def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = b"""\x80""" + b"""\x00""" * (63 - (len(self.data ) + 8) % 64) _UpperCAmelCase = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) ) return padded_data def lowerCAmelCase_ ( self : Union[str, Any] ): return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Dict ): _UpperCAmelCase = list(struct.unpack(""">16L""" , __lowerCAmelCase ) ) + [0] * 64 for i in range(16 , 80 ): _UpperCAmelCase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = self.padding() _UpperCAmelCase = self.split_blocks() for block in self.blocks: _UpperCAmelCase = self.expand_block(__lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.h for i in range(0 , 80 ): if 0 <= i < 20: _UpperCAmelCase = (b & c) | ((~b) & d) _UpperCAmelCase = 0x5a_82_79_99 elif 20 <= i < 40: _UpperCAmelCase = b ^ c ^ d _UpperCAmelCase = 0x6e_d9_eb_a1 elif 40 <= i < 60: _UpperCAmelCase = (b & c) | (b & d) | (c & d) _UpperCAmelCase = 0x8f_1b_bc_dc elif 60 <= i < 80: _UpperCAmelCase = b ^ c ^ d _UpperCAmelCase = 0xca_62_c1_d6 _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = ( self.rotate(__lowerCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0xff_ff_ff_ff, a, self.rotate(__lowerCAmelCase , 30 ), c, d, ) _UpperCAmelCase = ( self.h[0] + a & 0xff_ff_ff_ff, self.h[1] + b & 0xff_ff_ff_ff, self.h[2] + c & 0xff_ff_ff_ff, self.h[3] + d & 0xff_ff_ff_ff, self.h[4] + e & 0xff_ff_ff_ff, ) return ("{:08x}" * 5).format(*self.h ) def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = B"""Test String""" assert SHAaHash(lowercase ).final_hash() == hashlib.shaa(lowercase ).hexdigest() # noqa: S324 def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = argparse.ArgumentParser(description="""Process some strings or files""" ) parser.add_argument( """--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,) parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file ,"""rb""" ) as f: _UpperCAmelCase = f.read() else: _UpperCAmelCase = bytes(lowercase ,"""utf-8""" ) print(SHAaHash(lowercase ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
289
"""simple docstring""" import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class a : def __init__( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=99 , __lowerCAmelCase : int=64 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Union[str, Any]=64 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : str=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def lowerCAmelCase_ ( self : Union[str, Any] ): return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Optional[int] ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ): _UpperCAmelCase = MPNetModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ): _UpperCAmelCase = MPNetForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = MPNetForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = MPNetForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = MPNetForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.prepare_config_and_inputs() ((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : List[Any] = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) _snake_case : Union[str, Any] = ( { 'feature-extraction': MPNetModel, 'fill-mask': MPNetForMaskedLM, 'question-answering': MPNetForQuestionAnswering, 'text-classification': MPNetForSequenceClassification, 'token-classification': MPNetForTokenClassification, 'zero-shot': MPNetForSequenceClassification, } if is_torch_available() else {} ) _snake_case : int = False _snake_case : List[Any] = True def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = MPNetModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Dict ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*__lowerCAmelCase ) @require_torch class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = MPNetModel.from_pretrained("""microsoft/mpnet-base""" ) _UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCAmelCase = model(__lowerCAmelCase )[0] _UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor( [[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
289
1
"""simple docstring""" import numpy as np def __UpperCAmelCase ( lowercase ): """simple docstring""" return 1 / (1 + np.exp(-vector )) if __name__ == "__main__": import doctest doctest.testmod()
289
"""simple docstring""" UpperCAmelCase__ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) UpperCAmelCase__ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 1_2, """Pm""": 1_5, """Em""": 1_8, """Zm""": 2_1, """Ym""": 2_4, } def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = from_type.lower().strip("""s""" ) _UpperCAmelCase = to_type.lower().strip("""s""" ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) if from_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'from_type\' value: {from_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) if to_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'to_type\' value: {to_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) _UpperCAmelCase = METRIC_CONVERSION[from_sanitized] _UpperCAmelCase = METRIC_CONVERSION[to_sanitized] _UpperCAmelCase = 1 if from_exponent > to_exponent: _UpperCAmelCase = from_exponent - to_exponent else: _UpperCAmelCase = -(to_exponent - from_exponent) return value * pow(10 ,lowercase ) if __name__ == "__main__": from doctest import testmod testmod()
289
1
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging UpperCAmelCase__ = logging.get_logger(__name__) class a ( lowerCAmelCase_ ): _snake_case : str = ['input_features', 'attention_mask'] def __init__( self : Tuple , __lowerCAmelCase : Any=80 , __lowerCAmelCase : Union[str, Any]=1_6000 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Union[str, Any]=10 , __lowerCAmelCase : Optional[int]=25 , __lowerCAmelCase : Tuple="hamming_window" , __lowerCAmelCase : Tuple=32_768.0 , __lowerCAmelCase : List[Any]=0.97 , __lowerCAmelCase : List[Any]=1.0 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : Optional[int]=False , **__lowerCAmelCase : List[str] , ): super().__init__(feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase ) _UpperCAmelCase = feature_size _UpperCAmelCase = sampling_rate _UpperCAmelCase = padding_value _UpperCAmelCase = hop_length _UpperCAmelCase = win_length _UpperCAmelCase = frame_signal_scale _UpperCAmelCase = preemphasis_coeff _UpperCAmelCase = mel_floor _UpperCAmelCase = normalize_means _UpperCAmelCase = normalize_vars _UpperCAmelCase = win_function _UpperCAmelCase = return_attention_mask _UpperCAmelCase = win_length * sampling_rate // 1000 _UpperCAmelCase = hop_length * sampling_rate // 1000 _UpperCAmelCase = optimal_fft_length(self.sample_size ) _UpperCAmelCase = (self.n_fft // 2) + 1 def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : np.array ): if self.win_function == "hamming_window": _UpperCAmelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=__lowerCAmelCase ) else: _UpperCAmelCase = window_function(window_length=self.sample_size , name=self.win_function ) _UpperCAmelCase = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) _UpperCAmelCase = spectrogram( one_waveform * self.frame_signal_scale , window=__lowerCAmelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__lowerCAmelCase , preemphasis=self.preemphasis_coeff , mel_filters=__lowerCAmelCase , mel_floor=self.mel_floor , log_mel="""log""" , ) return msfc_features.T def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ): # make sure we normalize float32 arrays if self.normalize_means: _UpperCAmelCase = x[:input_length].mean(axis=0 ) _UpperCAmelCase = np.subtract(__lowerCAmelCase , __lowerCAmelCase ) if self.normalize_vars: _UpperCAmelCase = x[:input_length].std(axis=0 ) _UpperCAmelCase = np.divide(__lowerCAmelCase , __lowerCAmelCase ) if input_length < x.shape[0]: _UpperCAmelCase = padding_value # make sure array is in float32 _UpperCAmelCase = x.astype(np.floataa ) return x def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[np.ndarray] , __lowerCAmelCase : Optional[np.ndarray] = None ): _UpperCAmelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(__lowerCAmelCase , __lowerCAmelCase , self.padding_value ) for x, n in zip(__lowerCAmelCase , __lowerCAmelCase )] def __call__( self : Dict , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Optional[int] , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the ``sampling_rate`` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) _UpperCAmelCase = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) _UpperCAmelCase = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _UpperCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ): _UpperCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa ) elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _UpperCAmelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _UpperCAmelCase = [raw_speech] # extract fbank features _UpperCAmelCase = [self._extract_mfsc_features(__lowerCAmelCase ) for one_waveform in raw_speech] # convert into correct format for padding _UpperCAmelCase = BatchFeature({"""input_features""": features} ) _UpperCAmelCase = self.pad( __lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , ) # make sure list is in array format _UpperCAmelCase = padded_inputs.get("""input_features""" ) if isinstance(input_features[0] , __lowerCAmelCase ): _UpperCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in input_features] _UpperCAmelCase = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: _UpperCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: _UpperCAmelCase = ( np.array(__lowerCAmelCase , dtype=np.intaa ) if self._get_padding_strategies(__lowerCAmelCase , max_length=__lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) _UpperCAmelCase = self.normalize( padded_inputs["""input_features"""] , attention_mask=__lowerCAmelCase ) if return_tensors is not None: _UpperCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase ) return padded_inputs
289
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase__ = 1_6 UpperCAmelCase__ = 3_2 def __UpperCAmelCase ( lowercase ,lowercase = 16 ): """simple docstring""" _UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) _UpperCAmelCase = DataLoader( tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase__ = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1": _UpperCAmelCase = 2 # Initialize accelerator _UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config["""lr"""] _UpperCAmelCase = int(config["""num_epochs"""] ) _UpperCAmelCase = int(config["""seed"""] ) _UpperCAmelCase = int(config["""batch_size"""] ) _UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowercase ) def inner_training_loop(lowercase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(lowercase ,lowercase ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowercase ,references=lowercase ,) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' ,lowercase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" ,) parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowercase ,lowercase ) if __name__ == "__main__": main()
289
1
"""simple docstring""" import numpy as np def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = int(np.ceil((x_end - xa) / h ) ) _UpperCAmelCase = np.zeros((n + 1,) ) _UpperCAmelCase = ya _UpperCAmelCase = xa for k in range(lowercase ): _UpperCAmelCase = f(lowercase ,y[k] ) _UpperCAmelCase = f(x + 0.5 * h ,y[k] + 0.5 * h * ka ) _UpperCAmelCase = f(x + 0.5 * h ,y[k] + 0.5 * h * ka ) _UpperCAmelCase = f(x + h ,y[k] + h * ka ) _UpperCAmelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
289
"""simple docstring""" import warnings warnings.warn( """memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """ """`from accelerate import find_executable_batch_size` to avoid this warning.""", FutureWarning, )
289
1
"""simple docstring""" import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel UpperCAmelCase__ = logging.getLogger(__name__) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" # save results if os.path.exists(lowercase ): if os.path.exists(os.path.join(lowercase ,"""config.json""" ) ) and os.path.isfile( os.path.join(lowercase ,"""config.json""" ) ): os.remove(os.path.join(lowercase ,"""config.json""" ) ) if os.path.exists(os.path.join(lowercase ,"""pytorch_model.bin""" ) ) and os.path.isfile( os.path.join(lowercase ,"""pytorch_model.bin""" ) ): os.remove(os.path.join(lowercase ,"""pytorch_model.bin""" ) ) else: os.makedirs(lowercase ) model.save_pretrained(lowercase ) def __UpperCAmelCase ( lowercase ,lowercase=False ): """simple docstring""" _UpperCAmelCase = 2 if unlogit: _UpperCAmelCase = torch.pow(lowercase ,lowercase ) _UpperCAmelCase = p * torch.log(lowercase ) _UpperCAmelCase = 0 return -plogp.sum(dim=-1 ) def __UpperCAmelCase ( lowercase ): """simple docstring""" logger.info("""lv, h >\t""" + """\t""".join(f'''{x + 1}''' for x in range(len(lowercase ) ) ) ) for row in range(len(lowercase ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + """\t""".join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + """\t""".join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=True ,lowercase=True ,lowercase=None ,lowercase=False ): """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = model.config.num_hidden_layers, model.config.num_attention_heads _UpperCAmelCase = torch.zeros(lowercase ,lowercase ).to(args.device ) _UpperCAmelCase = torch.zeros(lowercase ,lowercase ).to(args.device ) if head_mask is None: _UpperCAmelCase = torch.ones(lowercase ,lowercase ).to(args.device ) head_mask.requires_grad_(requires_grad=lowercase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _UpperCAmelCase = None _UpperCAmelCase = 0.0 _UpperCAmelCase = 0.0 for step, inputs in enumerate(tqdm(lowercase ,desc="""Iteration""" ,disable=args.local_rank not in [-1, 0] ) ): _UpperCAmelCase = tuple(t.to(args.device ) for t in inputs ) ((_UpperCAmelCase) , ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _UpperCAmelCase = model(lowercase ,labels=lowercase ,head_mask=lowercase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(lowercase ): _UpperCAmelCase = entropy(attn.detach() ,lowercase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(lowercase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _UpperCAmelCase = 2 _UpperCAmelCase = torch.pow(torch.pow(lowercase ,lowercase ).sum(-1 ) ,1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: _UpperCAmelCase = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("""Attention entropies""" ) print_ad_tensor(lowercase ) if compute_importance: logger.info("""Head importance scores""" ) print_ad_tensor(lowercase ) logger.info("""Head ranked by importance scores""" ) _UpperCAmelCase = torch.zeros(head_importance.numel() ,dtype=torch.long ,device=args.device ) _UpperCAmelCase = torch.arange( head_importance.numel() ,device=args.device ) _UpperCAmelCase = head_ranks.view_as(lowercase ) print_ad_tensor(lowercase ) return attn_entropy, head_importance, total_loss def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = compute_heads_importance(lowercase ,lowercase ,lowercase ,compute_entropy=lowercase ) _UpperCAmelCase = 1 / loss # instead of downsteam score use the LM loss logger.info("""Pruning: original score: %f, threshold: %f""" ,lowercase ,original_score * args.masking_threshold ) _UpperCAmelCase = torch.ones_like(lowercase ) _UpperCAmelCase = max(1 ,int(new_head_mask.numel() * args.masking_amount ) ) _UpperCAmelCase = original_score while current_score >= original_score * args.masking_threshold: _UpperCAmelCase = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _UpperCAmelCase = float("""Inf""" ) _UpperCAmelCase = head_importance.view(-1 ).sort()[1] if len(lowercase ) <= num_to_mask: print("""BREAK BY num_to_mask""" ) break # mask heads _UpperCAmelCase = current_heads_to_mask[:num_to_mask] logger.info("""Heads to mask: %s""" ,str(current_heads_to_mask.tolist() ) ) _UpperCAmelCase = new_head_mask.view(-1 ) _UpperCAmelCase = 0.0 _UpperCAmelCase = new_head_mask.view_as(lowercase ) _UpperCAmelCase = new_head_mask.clone().detach() print_ad_tensor(lowercase ) # Compute metric and head importance again _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = compute_heads_importance( lowercase ,lowercase ,lowercase ,compute_entropy=lowercase ,head_mask=lowercase ) _UpperCAmelCase = 1 / loss logger.info( """Masking: current score: %f, remaining heads %d (%.1f percents)""" ,lowercase ,new_head_mask.sum() ,new_head_mask.sum() / new_head_mask.numel() * 1_00 ,) logger.info("""Final head mask""" ) print_ad_tensor(lowercase ) np.save(os.path.join(args.output_dir ,"""head_mask.npy""" ) ,head_mask.detach().cpu().numpy() ) return head_mask def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = datetime.now() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = compute_heads_importance( lowercase ,lowercase ,lowercase ,compute_entropy=lowercase ,compute_importance=lowercase ,head_mask=lowercase ) _UpperCAmelCase = 1 / loss _UpperCAmelCase = datetime.now() - before_time _UpperCAmelCase = sum(p.numel() for p in model.parameters() ) _UpperCAmelCase = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowercase ) ) } for k, v in heads_to_prune.items(): if isinstance(lowercase ,lowercase ): _UpperCAmelCase = [ v, ] assert sum(len(lowercase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(lowercase ) _UpperCAmelCase = sum(p.numel() for p in model.parameters() ) _UpperCAmelCase = datetime.now() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = compute_heads_importance( lowercase ,lowercase ,lowercase ,compute_entropy=lowercase ,compute_importance=lowercase ,head_mask=lowercase ,actually_pruned=lowercase ,) _UpperCAmelCase = 1 / loss _UpperCAmelCase = datetime.now() - before_time logger.info( """Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" ,lowercase ,lowercase ,pruned_num_params / original_num_params * 1_00 ,) logger.info("""Pruning: score with masking: %f score with pruning: %f""" ,lowercase ,lowercase ) logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" ,original_time / new_time * 1_00 ) save_model(lowercase ,args.output_dir ) def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--data_dir""" ,default=lowercase ,type=lowercase ,required=lowercase ,help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" ,) parser.add_argument( """--model_name_or_path""" ,default=lowercase ,type=lowercase ,required=lowercase ,help="""Path to pretrained model or model identifier from huggingface.co/models""" ,) parser.add_argument( """--output_dir""" ,default=lowercase ,type=lowercase ,required=lowercase ,help="""The output directory where the model predictions and checkpoints will be written.""" ,) # Other parameters parser.add_argument( """--config_name""" ,default="""""" ,type=lowercase ,help="""Pretrained config name or path if not the same as model_name_or_path""" ,) parser.add_argument( """--tokenizer_name""" ,default="""""" ,type=lowercase ,help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" ,) parser.add_argument( """--cache_dir""" ,default=lowercase ,type=lowercase ,help="""Where do you want to store the pre-trained models downloaded from s3""" ,) parser.add_argument( """--data_subset""" ,type=lowercase ,default=-1 ,help="""If > 0: limit the data to a subset of data_subset instances.""" ) parser.add_argument( """--overwrite_output_dir""" ,action="""store_true""" ,help="""Whether to overwrite data in output directory""" ) parser.add_argument( """--overwrite_cache""" ,action="""store_true""" ,help="""Overwrite the cached training and evaluation sets""" ) parser.add_argument( """--dont_normalize_importance_by_layer""" ,action="""store_true""" ,help="""Don't normalize importance score by layers""" ) parser.add_argument( """--dont_normalize_global_importance""" ,action="""store_true""" ,help="""Don't normalize all importance scores between 0 and 1""" ,) parser.add_argument( """--try_masking""" ,action="""store_true""" ,help="""Whether to try to mask head until a threshold of accuracy.""" ) parser.add_argument( """--masking_threshold""" ,default=0.9 ,type=lowercase ,help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" ,) parser.add_argument( """--masking_amount""" ,default=0.1 ,type=lowercase ,help="""Amount to heads to masking at each masking step.""" ) parser.add_argument("""--metric_name""" ,default="""acc""" ,type=lowercase ,help="""Metric to use for head masking.""" ) parser.add_argument( """--max_seq_length""" ,default=1_28 ,type=lowercase ,help=( """The maximum total input sequence length after WordPiece tokenization. \n""" """Sequences longer than this will be truncated, sequences shorter padded.""" ) ,) parser.add_argument("""--batch_size""" ,default=1 ,type=lowercase ,help="""Batch size.""" ) parser.add_argument("""--seed""" ,type=lowercase ,default=42 ) parser.add_argument("""--local_rank""" ,type=lowercase ,default=-1 ,help="""local_rank for distributed training on gpus""" ) parser.add_argument("""--no_cuda""" ,action="""store_true""" ,help="""Whether not to use CUDA when available""" ) parser.add_argument("""--server_ip""" ,type=lowercase ,default="""""" ,help="""Can be used for distant debugging.""" ) parser.add_argument("""--server_port""" ,type=lowercase ,default="""""" ,help="""Can be used for distant debugging.""" ) _UpperCAmelCase = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("""Waiting for debugger attach""" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=lowercase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _UpperCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" ) _UpperCAmelCase = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _UpperCAmelCase = torch.device("""cuda""" ,args.local_rank ) _UpperCAmelCase = 1 torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device ,args.n_gpu ,bool(args.local_rank != -1 ) ) ) _UpperCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _UpperCAmelCase = nn.parallel.DistributedDataParallel( lowercase ,device_ids=[args.local_rank] ,output_device=args.local_rank ,find_unused_parameters=lowercase ) elif args.n_gpu > 1: _UpperCAmelCase = nn.DataParallel(lowercase ) # Print/save training arguments os.makedirs(args.output_dir ,exist_ok=lowercase ) torch.save(lowercase ,os.path.join(args.output_dir ,"""run_args.bin""" ) ) logger.info("""Training/evaluation parameters %s""" ,lowercase ) # Prepare dataset _UpperCAmelCase = np.concatenate( [ np.loadtxt(args.data_dir ,dtype=np.intaa ), ] ) _UpperCAmelCase = (torch.from_numpy(lowercase ),) _UpperCAmelCase = TensorDataset(*lowercase ) _UpperCAmelCase = RandomSampler(lowercase ) _UpperCAmelCase = DataLoader(lowercase ,sampler=lowercase ,batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(lowercase ,lowercase ,lowercase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _UpperCAmelCase = mask_heads(lowercase ,lowercase ,lowercase ) prune_heads(lowercase ,lowercase ,lowercase ,lowercase ) if __name__ == "__main__": main()
289
"""simple docstring""" import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin UpperCAmelCase__ = logging.get_logger(__name__) enable_full_determinism() class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = UNetaDModel _snake_case : List[str] = 'sample' @property def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (32, 32) _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : List[Any] ): return (3, 32, 32) @property def lowerCAmelCase_ ( self : Optional[Any] ): return (3, 32, 32) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = { """block_out_channels""": (32, 64), """down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""), """up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""), """attention_head_dim""": 3, """out_channels""": 3, """in_channels""": 3, """layers_per_block""": 2, """sample_size""": 32, } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : int = UNetaDModel _snake_case : Optional[Any] = 'sample' @property def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = 4 _UpperCAmelCase = 4 _UpperCAmelCase = (32, 32) _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : Optional[Any] ): return (4, 32, 32) @property def lowerCAmelCase_ ( self : Dict ): return (4, 32, 32) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = { """sample_size""": 32, """in_channels""": 4, """out_channels""": 4, """layers_per_block""": 2, """block_out_channels""": (32, 64), """attention_head_dim""": 32, """down_block_types""": ("""DownBlock2D""", """DownBlock2D"""), """up_block_types""": ("""UpBlock2D""", """UpBlock2D"""), } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) _UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model.to(__lowerCAmelCase ) _UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self : str ): # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model_accelerate.to(__lowerCAmelCase ) model_accelerate.eval() _UpperCAmelCase = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) _UpperCAmelCase = noise.to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) _UpperCAmelCase = model_accelerate(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained( """fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase , low_cpu_mem_usage=__lowerCAmelCase ) model_normal_load.to(__lowerCAmelCase ) model_normal_load.eval() _UpperCAmelCase = model_normal_load(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ) model.eval() model.to(__lowerCAmelCase ) _UpperCAmelCase = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) _UpperCAmelCase = noise.to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) ) class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[Any] = UNetaDModel _snake_case : str = 'sample' @property def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str=(32, 32) ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : Any ): return (3, 32, 32) @property def lowerCAmelCase_ ( self : Union[str, Any] ): return (3, 32, 32) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = { """block_out_channels""": [32, 64, 64, 64], """in_channels""": 3, """layers_per_block""": 1, """out_channels""": 3, """time_embedding_type""": """fourier""", """norm_eps""": 1e-6, """mid_block_scale_factor""": math.sqrt(2.0 ), """norm_num_groups""": None, """down_block_types""": [ """SkipDownBlock2D""", """AttnSkipDownBlock2D""", """SkipDownBlock2D""", """SkipDownBlock2D""", ], """up_block_types""": [ """SkipUpBlock2D""", """SkipUpBlock2D""", """AttnSkipUpBlock2D""", """SkipUpBlock2D""", ], } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict @slow def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) _UpperCAmelCase = self.dummy_input _UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(__lowerCAmelCase ) _UpperCAmelCase = noise _UpperCAmelCase = model(**__lowerCAmelCase ) assert image is not None, "Make sure output is not None" @slow def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ) model.to(__lowerCAmelCase ) _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (256, 256) _UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" ) model.to(__lowerCAmelCase ) _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (32, 32) _UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def lowerCAmelCase_ ( self : List[str] ): # not required for this model pass
289
1
"""simple docstring""" import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(""".""") def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( """`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """ f'''{test_file} instead.''' ) _UpperCAmelCase = components[-1] if not test_fn.endswith("""py""" ): raise ValueError(f'''`test_file` should be a python file. Got {test_fn} instead.''' ) if not test_fn.startswith("""test_modeling_""" ): raise ValueError( f'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' ) _UpperCAmelCase = components[:-1] + [test_fn.replace(""".py""" ,"""""" )] _UpperCAmelCase = """.""".join(lowercase ) return test_module_path def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = get_module_path(lowercase ) _UpperCAmelCase = importlib.import_module(lowercase ) return test_module def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [] _UpperCAmelCase = get_test_module(lowercase ) for attr in dir(lowercase ): if attr.endswith("""ModelTester""" ): tester_classes.append(getattr(lowercase ,lowercase ) ) # sort with class names return sorted(lowercase ,key=lambda lowercase : x.__name__ ) def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [] _UpperCAmelCase = get_test_module(lowercase ) for attr in dir(lowercase ): _UpperCAmelCase = getattr(lowercase ,lowercase ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). _UpperCAmelCase = getattr(lowercase ,"""all_model_classes""" ,[] ) if len(lowercase ) > 0: test_classes.append(lowercase ) # sort with class names return sorted(lowercase ,key=lambda lowercase : x.__name__ ) def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = get_test_classes(lowercase ) _UpperCAmelCase = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(lowercase ,key=lambda lowercase : x.__name__ ) def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = test_class() if hasattr(lowercase ,"""setUp""" ): test.setUp() _UpperCAmelCase = None if hasattr(lowercase ,"""model_tester""" ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: _UpperCAmelCase = test.model_tester.__class__ return model_tester def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = get_test_classes(lowercase ) _UpperCAmelCase = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(lowercase ) # sort with class names return sorted(lowercase ,key=lambda lowercase : x.__name__ ) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = get_test_classes_for_model(lowercase ,lowercase ) _UpperCAmelCase = [] for test_class in test_classes: _UpperCAmelCase = get_model_tester_from_test_class(lowercase ) if tester_class is not None: tester_classes.append(lowercase ) # sort with class names return sorted(lowercase ,key=lambda lowercase : x.__name__ ) def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = get_test_classes(lowercase ) _UpperCAmelCase = {test_class: get_model_tester_from_test_class(lowercase ) for test_class in test_classes} return test_tester_mapping def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = get_model_classes(lowercase ) _UpperCAmelCase = { model_class: get_test_classes_for_model(lowercase ,lowercase ) for model_class in model_classes } return model_test_mapping def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = get_model_classes(lowercase ) _UpperCAmelCase = { model_class: get_tester_classes_for_model(lowercase ,lowercase ) for model_class in model_classes } return model_to_tester_mapping def __UpperCAmelCase ( lowercase ): """simple docstring""" if isinstance(lowercase ,lowercase ): return o elif isinstance(lowercase ,lowercase ): return o.__name__ elif isinstance(lowercase ,(list, tuple) ): return [to_json(lowercase ) for x in o] elif isinstance(lowercase ,lowercase ): return {to_json(lowercase ): to_json(lowercase ) for k, v in o.items()} else: return o
289
"""simple docstring""" import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : int = StableUnCLIPPipeline _snake_case : str = TEXT_TO_IMAGE_PARAMS _snake_case : Any = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS _snake_case : str = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _snake_case : str = False def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = 32 _UpperCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) _UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=__lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) _UpperCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__lowerCAmelCase , num_layers=1 , ) torch.manual_seed(0 ) _UpperCAmelCase = DDPMScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , ) # regular denoising components torch.manual_seed(0 ) _UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=__lowerCAmelCase ) _UpperCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) _UpperCAmelCase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCAmelCase , layers_per_block=1 , upcast_attention=__lowerCAmelCase , use_linear_projection=__lowerCAmelCase , ) torch.manual_seed(0 ) _UpperCAmelCase = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , ) torch.manual_seed(0 ) _UpperCAmelCase = AutoencoderKL() _UpperCAmelCase = { # prior components """prior_tokenizer""": prior_tokenizer, """prior_text_encoder""": prior_text_encoder, """prior""": prior, """prior_scheduler""": prior_scheduler, # image noising components """image_normalizer""": image_normalizer, """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder, """unet""": unet, """scheduler""": scheduler, """vae""": vae, } return components def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=0 ): if str(__lowerCAmelCase ).startswith("""mps""" ): _UpperCAmelCase = torch.manual_seed(__lowerCAmelCase ) else: _UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _UpperCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """prior_num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = torch_device == """cpu""" self._test_attention_slicing_forward_pass(test_max_difference=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=__lowerCAmelCase ) @slow @require_torch_gpu class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" ) _UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _UpperCAmelCase = pipe("""anime turle""" , generator=__lowerCAmelCase , output_type="""np""" ) _UpperCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) _UpperCAmelCase = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _UpperCAmelCase = pipe( """anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , ) _UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
289
1
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,): """simple docstring""" _UpperCAmelCase = len(lowercase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(lowercase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] ,[*diagonal_right_collisions, row - col] ,[*diagonal_left_collisions, row + col] ,lowercase ,lowercase ,) def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [] depth_first_search([] ,[] ,[] ,lowercase ,lowercase ) # Print all the boards for board in boards: for column in board: print(lowercase ) print("""""" ) print(len(lowercase ) ,"""solutions were found.""" ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
289
"""simple docstring""" from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
289
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Dict = 'open-llama' def __init__( self : int , __lowerCAmelCase : Any=10_0000 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : List[str]=1_1008 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : str="silu" , __lowerCAmelCase : List[Any]=2048 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[int]=1e-6 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[Any]=1 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Dict=True , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : str=None , **__lowerCAmelCase : Optional[Any] , ): _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = intermediate_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = initializer_range _UpperCAmelCase = rms_norm_eps _UpperCAmelCase = use_cache _UpperCAmelCase = kwargs.pop( """use_memorry_efficient_attention""" , __lowerCAmelCase ) _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_dropout_prob _UpperCAmelCase = use_stable_embedding _UpperCAmelCase = shared_input_output_embedding _UpperCAmelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , **__lowerCAmelCase , ) def lowerCAmelCase_ ( self : List[Any] ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowerCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f'''got {self.rope_scaling}''' ) _UpperCAmelCase = self.rope_scaling.get("""type""" , __lowerCAmelCase ) _UpperCAmelCase = self.rope_scaling.get("""factor""" , __lowerCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
289
"""simple docstring""" import requests UpperCAmelCase__ = """""" # <-- Put your OpenWeatherMap appid here! UpperCAmelCase__ = """https://api.openweathermap.org/data/2.5/""" def __UpperCAmelCase ( lowercase = "Chicago" ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """weather""" ,params=locals() ).json() def __UpperCAmelCase ( lowercase = "Kolkata, India" ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """forecast""" ,params=locals() ).json() def __UpperCAmelCase ( lowercase = 55.68 ,lowercase = 12.57 ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """onecall""" ,params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: UpperCAmelCase__ = input("""Enter a location:""").strip() if location: pprint(current_weather(location)) else: break
289
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""", # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class a ( lowerCAmelCase_ ): _snake_case : Optional[int] = 'wav2vec2' def __init__( self : Tuple , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Tuple=768 , __lowerCAmelCase : Optional[int]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : Optional[Any]=1e-5 , __lowerCAmelCase : Tuple="group" , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : List[str]=(512, 512, 512, 512, 512, 512, 512) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Dict=(10, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[str]=128 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Any=False , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=0.05 , __lowerCAmelCase : str=10 , __lowerCAmelCase : str=2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : Optional[int]=10 , __lowerCAmelCase : List[Any]=0 , __lowerCAmelCase : Optional[Any]=320 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[Any]=100 , __lowerCAmelCase : Optional[Any]=256 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Optional[int]="sum" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Dict=256 , __lowerCAmelCase : int=(512, 512, 512, 512, 1500) , __lowerCAmelCase : Optional[int]=(5, 3, 3, 1, 1) , __lowerCAmelCase : List[str]=(1, 2, 3, 1, 1) , __lowerCAmelCase : Union[str, Any]=512 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Dict , ): super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase ) _UpperCAmelCase = hidden_size _UpperCAmelCase = feat_extract_norm _UpperCAmelCase = feat_extract_activation _UpperCAmelCase = list(__lowerCAmelCase ) _UpperCAmelCase = list(__lowerCAmelCase ) _UpperCAmelCase = list(__lowerCAmelCase ) _UpperCAmelCase = conv_bias _UpperCAmelCase = num_conv_pos_embeddings _UpperCAmelCase = num_conv_pos_embedding_groups _UpperCAmelCase = len(self.conv_dim ) _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = activation_dropout _UpperCAmelCase = feat_proj_dropout _UpperCAmelCase = final_dropout _UpperCAmelCase = layerdrop _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = vocab_size _UpperCAmelCase = do_stable_layer_norm _UpperCAmelCase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _UpperCAmelCase = apply_spec_augment _UpperCAmelCase = mask_time_prob _UpperCAmelCase = mask_time_length _UpperCAmelCase = mask_time_min_masks _UpperCAmelCase = mask_feature_prob _UpperCAmelCase = mask_feature_length _UpperCAmelCase = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _UpperCAmelCase = num_codevectors_per_group _UpperCAmelCase = num_codevector_groups _UpperCAmelCase = contrastive_logits_temperature _UpperCAmelCase = feat_quantizer_dropout _UpperCAmelCase = num_negatives _UpperCAmelCase = codevector_dim _UpperCAmelCase = proj_codevector_dim _UpperCAmelCase = diversity_loss_weight # ctc loss _UpperCAmelCase = ctc_loss_reduction _UpperCAmelCase = ctc_zero_infinity # adapter _UpperCAmelCase = add_adapter _UpperCAmelCase = adapter_kernel_size _UpperCAmelCase = adapter_stride _UpperCAmelCase = num_adapter_layers _UpperCAmelCase = output_hidden_size or hidden_size _UpperCAmelCase = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. _UpperCAmelCase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _UpperCAmelCase = list(__lowerCAmelCase ) _UpperCAmelCase = list(__lowerCAmelCase ) _UpperCAmelCase = list(__lowerCAmelCase ) _UpperCAmelCase = xvector_output_dim @property def lowerCAmelCase_ ( self : Dict ): return functools.reduce(operator.mul , self.conv_stride , 1 )
289
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = get_failure_array(lowercase ) # 2) Step through text searching for pattern _UpperCAmelCase , _UpperCAmelCase = 0, 0 # index into text, pattern while i < len(lowercase ): if pattern[j] == text[i]: if j == (len(lowercase ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: _UpperCAmelCase = failure[j - 1] continue i += 1 return False def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [0] _UpperCAmelCase = 0 _UpperCAmelCase = 1 while j < len(lowercase ): if pattern[i] == pattern[j]: i += 1 elif i > 0: _UpperCAmelCase = failure[i - 1] continue j += 1 failure.append(lowercase ) return failure if __name__ == "__main__": # Test 1) UpperCAmelCase__ = """abc1abc12""" UpperCAmelCase__ = """alskfjaldsabc1abc1abc12k23adsfabcabc""" UpperCAmelCase__ = """alskfjaldsk23adsfabcabc""" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) UpperCAmelCase__ = """ABABX""" UpperCAmelCase__ = """ABABZABABYABABX""" assert kmp(pattern, text) # Test 3) UpperCAmelCase__ = """AAAB""" UpperCAmelCase__ = """ABAAAAAB""" assert kmp(pattern, text) # Test 4) UpperCAmelCase__ = """abcdabcy""" UpperCAmelCase__ = """abcxabcdabxabcdabcdabcy""" assert kmp(pattern, text) # Test 5) UpperCAmelCase__ = """aabaabaaa""" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
289
1
"""simple docstring""" from sklearn.metrics import recall_score import datasets UpperCAmelCase__ = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ UpperCAmelCase__ = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ UpperCAmelCase__ = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): def lowerCAmelCase_ ( self : Tuple ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]="binary" , __lowerCAmelCase : Any=None , __lowerCAmelCase : int="warn" , ): _UpperCAmelCase = recall_score( __lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase , zero_division=__lowerCAmelCase , ) return {"recall": float(__lowerCAmelCase ) if score.size == 1 else score}
289
"""simple docstring""" from sklearn.metrics import recall_score import datasets UpperCAmelCase__ = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ UpperCAmelCase__ = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ UpperCAmelCase__ = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): def lowerCAmelCase_ ( self : Tuple ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]="binary" , __lowerCAmelCase : Any=None , __lowerCAmelCase : int="warn" , ): _UpperCAmelCase = recall_score( __lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase , zero_division=__lowerCAmelCase , ) return {"recall": float(__lowerCAmelCase ) if score.size == 1 else score}
289
1
"""simple docstring""" from __future__ import annotations from random import choice def __UpperCAmelCase ( lowercase ): """simple docstring""" return choice(lowercase ) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = random_pivot(lowercase ) # partition based on pivot # linear time _UpperCAmelCase = [e for e in lst if e < pivot] _UpperCAmelCase = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(lowercase ) == k - 1: return pivot # pivot is in elements bigger than k elif len(lowercase ) < k - 1: return kth_number(lowercase ,k - len(lowercase ) - 1 ) # pivot is in elements smaller than k else: return kth_number(lowercase ,lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
289
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase__ = """platform""" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class a : _snake_case : Tuple = PegasusConfig _snake_case : int = {} _snake_case : str = 'gelu' def __init__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=99 , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=20 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Any=0 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = eos_token_id _UpperCAmelCase = pad_token_id _UpperCAmelCase = bos_token_id def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) _UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) _UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _UpperCAmelCase = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return config, inputs_dict def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ): _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ): _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _UpperCAmelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ,): """simple docstring""" if attention_mask is None: _UpperCAmelCase = np.not_equal(lowercase ,config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: _UpperCAmelCase = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ), ] ,axis=-1 ,) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Dict = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) _snake_case : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () _snake_case : Optional[Any] = True _snake_case : List[str] = False _snake_case : Dict = False _snake_case : str = False def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = FlaxPegasusModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_class(__lowerCAmelCase ) @jax.jit def encode_jitted(__lowerCAmelCase : str , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Dict ): return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase ) with self.subTest("""JIT Enabled""" ): _UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = model_class(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) _UpperCAmelCase = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(__lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ): return model.decode( decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , ) with self.subTest("""JIT Enabled""" ): _UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCAmelCase_ ( self : Optional[int] ): for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__lowerCAmelCase ) _UpperCAmelCase = np.ones((1, 1) ) _UpperCAmelCase = model(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) _UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) _UpperCAmelCase = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] _UpperCAmelCase = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] _UpperCAmelCase = tokenizer(__lowerCAmelCase , return_tensors="""np""" , truncation=__lowerCAmelCase , max_length=512 , padding=__lowerCAmelCase ) _UpperCAmelCase = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences _UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) assert tgt_text == decoded
289
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class a ( lowerCAmelCase_ ): _snake_case : int = 'donut-swin' _snake_case : Union[str, Any] = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : int , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Tuple=96 , __lowerCAmelCase : List[Any]=[2, 2, 6, 2] , __lowerCAmelCase : Optional[int]=[3, 6, 12, 24] , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : Tuple=4.0 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : Union[str, Any]=1e-5 , **__lowerCAmelCase : Optional[Any] , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = len(__lowerCAmelCase ) _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _UpperCAmelCase = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) )
289
"""simple docstring""" import math def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [] _UpperCAmelCase = 2 _UpperCAmelCase = int(math.sqrt(lowercase ) ) # Size of every segment _UpperCAmelCase = [True] * (end + 1) _UpperCAmelCase = [] while start <= end: if temp[start] is True: in_prime.append(lowercase ) for i in range(start * start ,end + 1 ,lowercase ): _UpperCAmelCase = False start += 1 prime += in_prime _UpperCAmelCase = end + 1 _UpperCAmelCase = min(2 * end ,lowercase ) while low <= n: _UpperCAmelCase = [True] * (high - low + 1) for each in in_prime: _UpperCAmelCase = math.floor(low / each ) * each if t < low: t += each for j in range(lowercase ,high + 1 ,lowercase ): _UpperCAmelCase = False for j in range(len(lowercase ) ): if temp[j] is True: prime.append(j + low ) _UpperCAmelCase = high + 1 _UpperCAmelCase = min(high + end ,lowercase ) return prime print(sieve(1_0**6))
289
1
"""simple docstring""" import math def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [] _UpperCAmelCase = 2 _UpperCAmelCase = int(math.sqrt(lowercase ) ) # Size of every segment _UpperCAmelCase = [True] * (end + 1) _UpperCAmelCase = [] while start <= end: if temp[start] is True: in_prime.append(lowercase ) for i in range(start * start ,end + 1 ,lowercase ): _UpperCAmelCase = False start += 1 prime += in_prime _UpperCAmelCase = end + 1 _UpperCAmelCase = min(2 * end ,lowercase ) while low <= n: _UpperCAmelCase = [True] * (high - low + 1) for each in in_prime: _UpperCAmelCase = math.floor(low / each ) * each if t < low: t += each for j in range(lowercase ,high + 1 ,lowercase ): _UpperCAmelCase = False for j in range(len(lowercase ) ): if temp[j] is True: prime.append(j + low ) _UpperCAmelCase = high + 1 _UpperCAmelCase = min(high + end ,lowercase ) return prime print(sieve(1_0**6))
289
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _UpperCAmelCase = TapasConfig.from_json_file(lowercase ) # set absolute/relative position embeddings parameter _UpperCAmelCase = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "WTQ": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = True # hparam_utils.py hparams _UpperCAmelCase = 0.66_46_94 _UpperCAmelCase = 0.20_79_51 _UpperCAmelCase = 0.12_11_94 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = 0.0_35_25_13 _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = False # hparam_utils.py hparams _UpperCAmelCase = 36.45_19 _UpperCAmelCase = 0.90_34_21 _UpperCAmelCase = 2_22.0_88 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = 0.76_31_41 _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "TABFACT": _UpperCAmelCase = TapasForSequenceClassification(config=lowercase ) elif task == "MLM": _UpperCAmelCase = TapasForMaskedLM(config=lowercase ) elif task == "INTERMEDIATE_PRETRAINING": _UpperCAmelCase = TapasModel(config=lowercase ) else: raise ValueError(f'''Task {task} not supported.''' ) print(f'''Building PyTorch model from configuration: {config}''' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(lowercase ,lowercase ,lowercase ) # Save pytorch-model (weights and configuration) print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(lowercase ) # Save tokenizer files print(f'''Save tokenizer files to {pytorch_dump_path}''' ) _UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" ,model_max_length=5_12 ) tokenizer.save_pretrained(lowercase ) print("""Used relative position embeddings:""" ,model.config.reset_position_index_per_cell ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
289
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a ( unittest.TestCase ): def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Optional[Any]=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : List[str]=400 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]=True , ): _UpperCAmelCase = size if size is not None else {"""shortest_edge""": 20} _UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = do_flip_channel_order def lowerCAmelCase_ ( self : List[str] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = MobileViTImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = MobileViTImageProcessingTester(self ) @property def lowerCAmelCase_ ( self : Tuple ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_flip_channel_order""" ) ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def lowerCAmelCase_ ( self : List[str] ): pass def lowerCAmelCase_ ( self : Dict ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : str ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : Optional[int] ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
289
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml UpperCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" def run_func(lowercase ): @wraps(lowercase ) def run_in_eager_mode(*lowercase ,**lowercase ): return func(*lowercase ,**lowercase ) @wraps(lowercase ) @tf.function(experimental_compile=lowercase ) def run_in_graph_mode(*lowercase ,**lowercase ): return func(*lowercase ,**lowercase ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = random.Random() _UpperCAmelCase = [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(lowercase ,shape=(batch_size, sequence_length) ,dtype=tf.intaa ) class a ( lowerCAmelCase_ ): _snake_case : TensorFlowBenchmarkArguments _snake_case : PretrainedConfig _snake_case : str = "TensorFlow" @property def lowerCAmelCase_ ( self : Union[str, Any] ): return tf.__version__ def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): # initialize GPU on separate process _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_speed(_inference ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_speed(_train ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase ) _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_memory(_inference ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase ) _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_memory(_train ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _UpperCAmelCase = ( hasattr(__lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , __lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_cls(__lowerCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _UpperCAmelCase = TF_MODEL_MAPPING[config.__class__](__lowerCAmelCase ) # encoder-decoder has vocab size saved differently _UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , training=__lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(__lowerCAmelCase , training=__lowerCAmelCase ) _UpperCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _UpperCAmelCase = ( hasattr(__lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , __lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_cls(__lowerCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _UpperCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCAmelCase ) # encoder-decoder has vocab size saved differently _UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): _UpperCAmelCase = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0] _UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): _UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0] _UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables ) return gradients _UpperCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Any ): with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(__lowerCAmelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _UpperCAmelCase = timeit.repeat( __lowerCAmelCase , repeat=self.args.repeat , number=10 , ) return min(__lowerCAmelCase ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Callable[[], None] ): logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _UpperCAmelCase = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _UpperCAmelCase = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _UpperCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _UpperCAmelCase = nvml.nvmlDeviceGetMemoryInfo(__lowerCAmelCase ) _UpperCAmelCase = meminfo.used _UpperCAmelCase = Memory(__lowerCAmelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _UpperCAmelCase = None else: _UpperCAmelCase = measure_peak_memory_cpu(__lowerCAmelCase ) _UpperCAmelCase = Memory(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else memory_bytes if self.args.trace_memory_line_by_line: _UpperCAmelCase = stop_memory_tracing(__lowerCAmelCase ) if memory is None: _UpperCAmelCase = summary.total else: _UpperCAmelCase = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
289
1
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase__ = """platform""" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class a : _snake_case : Tuple = PegasusConfig _snake_case : int = {} _snake_case : str = 'gelu' def __init__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=99 , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=20 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Any=0 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = eos_token_id _UpperCAmelCase = pad_token_id _UpperCAmelCase = bos_token_id def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) _UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) _UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _UpperCAmelCase = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return config, inputs_dict def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ): _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ): _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _UpperCAmelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ,): """simple docstring""" if attention_mask is None: _UpperCAmelCase = np.not_equal(lowercase ,config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: _UpperCAmelCase = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ), ] ,axis=-1 ,) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Dict = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) _snake_case : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () _snake_case : Optional[Any] = True _snake_case : List[str] = False _snake_case : Dict = False _snake_case : str = False def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = FlaxPegasusModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_class(__lowerCAmelCase ) @jax.jit def encode_jitted(__lowerCAmelCase : str , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Dict ): return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase ) with self.subTest("""JIT Enabled""" ): _UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = model_class(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) _UpperCAmelCase = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(__lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ): return model.decode( decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , ) with self.subTest("""JIT Enabled""" ): _UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCAmelCase_ ( self : Optional[int] ): for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__lowerCAmelCase ) _UpperCAmelCase = np.ones((1, 1) ) _UpperCAmelCase = model(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) _UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) _UpperCAmelCase = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] _UpperCAmelCase = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] _UpperCAmelCase = tokenizer(__lowerCAmelCase , return_tensors="""np""" , truncation=__lowerCAmelCase , max_length=512 , padding=__lowerCAmelCase ) _UpperCAmelCase = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences _UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) assert tgt_text == decoded
289
"""simple docstring""" from math import pow def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,): """simple docstring""" if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count _UpperCAmelCase = int(pow(lowercase ,lowercase ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n _UpperCAmelCase , _UpperCAmelCase = backtrack( lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. _UpperCAmelCase , _UpperCAmelCase = backtrack( lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase ) return current_sum, solutions_count def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10): raise ValueError( """Invalid input\n""" """needed_sum must be between 1 and 1000, power between 2 and 10.""" ) return backtrack(lowercase ,lowercase ,1 ,0 ,0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
289
1
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = BlipImageProcessor() _UpperCAmelCase = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" ) _UpperCAmelCase = BlipaProcessor(__lowerCAmelCase , __lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def lowerCAmelCase_ ( self : Dict , **__lowerCAmelCase : Dict ): return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).tokenizer def lowerCAmelCase_ ( self : Dict , **__lowerCAmelCase : Optional[Any] ): return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor def lowerCAmelCase_ ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _UpperCAmelCase = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) _UpperCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) _UpperCAmelCase = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""" ) _UpperCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _UpperCAmelCase = """lower newer""" _UpperCAmelCase = processor(text=__lowerCAmelCase ) _UpperCAmelCase = tokenizer(__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _UpperCAmelCase = """lower newer""" _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCAmelCase = processor.batch_decode(__lowerCAmelCase ) _UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _UpperCAmelCase = """lower newer""" _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
289
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } UpperCAmelCase__ = { """b0""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 2_2_4, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 2_4_0, """dropout_rate""": 0.2, """dw_padding""": [1_6], }, """b2""": { """hidden_dim""": 1_4_0_8, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 2_6_0, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 1_6], }, """b3""": { """hidden_dim""": 1_5_3_6, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 3_0_0, """dropout_rate""": 0.3, """dw_padding""": [5, 1_8], }, """b4""": { """hidden_dim""": 1_7_9_2, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 3_8_0, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_0_4_8, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 4_5_6, """dropout_rate""": 0.4, """dw_padding""": [1_3, 2_7], }, """b6""": { """hidden_dim""": 2_3_0_4, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 5_2_8, """dropout_rate""": 0.5, """dw_padding""": [3_1], }, """b7""": { """hidden_dim""": 2_5_6_0, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 6_0_0, """dropout_rate""": 0.5, """dw_padding""": [1_8], }, } def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = EfficientNetConfig() _UpperCAmelCase = CONFIG_MAP[model_name]["""hidden_dim"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""width_coef"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""depth_coef"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""dropout_rate"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""dw_padding"""] _UpperCAmelCase = """huggingface/label-files""" _UpperCAmelCase = """imagenet-1k-id2label.json""" _UpperCAmelCase = 10_00 _UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) ) _UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw ) return im def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = EfficientNetImageProcessor( size={"""height""": size, """width""": size} ,image_mean=[0.4_85, 0.4_56, 0.4_06] ,image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] ,do_center_crop=lowercase ,) return preprocessor def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] _UpperCAmelCase = sorted(set(lowercase ) ) _UpperCAmelCase = len(lowercase ) _UpperCAmelCase = {b: str(lowercase ) for b, i in zip(lowercase ,range(lowercase ) )} _UpperCAmelCase = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: _UpperCAmelCase = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) _UpperCAmelCase = {} for item in rename_keys: if item[0] in original_param_names: _UpperCAmelCase = """efficientnet.""" + item[1] _UpperCAmelCase = """classifier.weight""" _UpperCAmelCase = """classifier.bias""" return key_mapping def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue _UpperCAmelCase = key_mapping[key] if "_conv" in key and "kernel" in key: _UpperCAmelCase = torch.from_numpy(lowercase ).permute(3 ,2 ,0 ,1 ) elif "depthwise_kernel" in key: _UpperCAmelCase = torch.from_numpy(lowercase ).permute(2 ,3 ,0 ,1 ) elif "kernel" in key: _UpperCAmelCase = torch.from_numpy(np.transpose(lowercase ) ) else: _UpperCAmelCase = torch.from_numpy(lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase ) @torch.no_grad() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = model_classes[model_name]( include_top=lowercase ,weights="""imagenet""" ,input_tensor=lowercase ,input_shape=lowercase ,pooling=lowercase ,classes=10_00 ,classifier_activation="""softmax""" ,) _UpperCAmelCase = original_model.trainable_variables _UpperCAmelCase = original_model.non_trainable_variables _UpperCAmelCase = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: _UpperCAmelCase = param.numpy() _UpperCAmelCase = list(tf_params.keys() ) # Load HuggingFace model _UpperCAmelCase = get_efficientnet_config(lowercase ) _UpperCAmelCase = EfficientNetForImageClassification(lowercase ).eval() _UpperCAmelCase = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) _UpperCAmelCase = rename_keys(lowercase ) replace_params(lowercase ,lowercase ,lowercase ) # Initialize preprocessor and preprocess input image _UpperCAmelCase = convert_image_processor(lowercase ) _UpperCAmelCase = preprocessor(images=prepare_img() ,return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): _UpperCAmelCase = hf_model(**lowercase ) _UpperCAmelCase = outputs.logits.detach().numpy() # Original model inference _UpperCAmelCase = False _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST ) _UpperCAmelCase = image.img_to_array(lowercase ) _UpperCAmelCase = np.expand_dims(lowercase ,axis=0 ) _UpperCAmelCase = original_model.predict(lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase ,lowercase ,atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase ): os.mkdir(lowercase ) # Save converted model and image processor hf_model.save_pretrained(lowercase ) preprocessor.save_pretrained(lowercase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) _UpperCAmelCase = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowercase ) hf_model.push_to_hub(lowercase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") UpperCAmelCase__ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
289
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ): """simple docstring""" return [ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], }, { 0: [6], 1: [9], 2: [4, 5], 3: [4], 4: [2, 3], 5: [2], 6: [0, 7], 7: [6], 8: [], 9: [1], }, { 0: [4], 1: [6], 2: [], 3: [5, 6, 7], 4: [0, 6], 5: [3, 8, 9], 6: [1, 3, 4, 7], 7: [3, 6, 8, 9], 8: [5, 7], 9: [5, 7], }, { 0: [1, 3], 1: [0, 2, 4], 2: [1, 3, 4], 3: [0, 2, 4], 4: [1, 2, 3], }, ][index] def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = 0 _UpperCAmelCase = len(lowercase ) # No of vertices in graph _UpperCAmelCase = [0] * n _UpperCAmelCase = [False] * n def dfs(lowercase ,lowercase ,lowercase ,lowercase ): _UpperCAmelCase = True _UpperCAmelCase = id_ id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: dfs(lowercase ,lowercase ,lowercase ,id_ ) _UpperCAmelCase = min(low[at] ,low[to] ) if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at) ) else: # This edge is a back edge and cannot be a bridge _UpperCAmelCase = min(low[at] ,low[to] ) _UpperCAmelCase = [] for i in range(lowercase ): if not visited[i]: dfs(lowercase ,-1 ,lowercase ,id_ ) return bridges if __name__ == "__main__": import doctest doctest.testmod()
289
"""simple docstring""" from __future__ import annotations from collections import Counter from random import random class a : def __init__( self : Union[str, Any] ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : float ): if nodea not in self.connections: self.add_node(__lowerCAmelCase ) if nodea not in self.connections: self.add_node(__lowerCAmelCase ) _UpperCAmelCase = probability def lowerCAmelCase_ ( self : Optional[Any] ): return list(self.connections ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ): _UpperCAmelCase = 0 _UpperCAmelCase = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(lowercase ,lowercase ,lowercase ) _UpperCAmelCase = Counter(graph.get_nodes() ) _UpperCAmelCase = start for _ in range(lowercase ): _UpperCAmelCase = graph.transition(lowercase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
289
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase__ = logging.get_logger(__name__) class a ( lowerCAmelCase_ ): _snake_case : Optional[int] = ['pixel_values'] def __init__( self : Dict , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Dict[str, int]] = None , __lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[int, float] = 1 / 255 , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , **__lowerCAmelCase : Tuple , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = size if size is not None else {"""height""": 224, """width""": 224} _UpperCAmelCase = get_size_dict(__lowerCAmelCase ) _UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} _UpperCAmelCase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase , param_name="""crop_size""" ) _UpperCAmelCase = do_resize _UpperCAmelCase = do_rescale _UpperCAmelCase = do_normalize _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = size _UpperCAmelCase = resample _UpperCAmelCase = rescale_factor _UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Optional[Any] , ): _UpperCAmelCase = get_size_dict(__lowerCAmelCase ) if "shortest_edge" in size: _UpperCAmelCase = get_resize_output_image_size(__lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=__lowerCAmelCase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: _UpperCAmelCase = (size["""height"""], size["""width"""]) else: raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' ) return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Any , ): _UpperCAmelCase = get_size_dict(__lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(__lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : float , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : List[str] ): return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Union[float, List[float]] , __lowerCAmelCase : Union[float, List[float]] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Dict , ): return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : ImageInput , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : int = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[float] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowerCAmelCase : Union[str, Any] , ): _UpperCAmelCase = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCAmelCase = crop_size if crop_size is not None else self.crop_size _UpperCAmelCase = get_size_dict(__lowerCAmelCase , param_name="""crop_size""" , default_to_square=__lowerCAmelCase ) _UpperCAmelCase = resample if resample is not None else self.resample _UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCAmelCase = image_mean if image_mean is not None else self.image_mean _UpperCAmelCase = image_std if image_std is not None else self.image_std _UpperCAmelCase = size if size is not None else self.size _UpperCAmelCase = get_size_dict(__lowerCAmelCase ) if not is_batched(__lowerCAmelCase ): _UpperCAmelCase = [images] if not valid_images(__lowerCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. _UpperCAmelCase = [to_numpy_array(__lowerCAmelCase ) for image in images] if do_resize: _UpperCAmelCase = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images] if do_center_crop: _UpperCAmelCase = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images] if do_rescale: _UpperCAmelCase = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images] if do_normalize: _UpperCAmelCase = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images] _UpperCAmelCase = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images] _UpperCAmelCase = {"""pixel_values""": images} return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
289
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a ( unittest.TestCase ): def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Optional[Any]=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : List[str]=400 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]=True , ): _UpperCAmelCase = size if size is not None else {"""shortest_edge""": 20} _UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = do_flip_channel_order def lowerCAmelCase_ ( self : List[str] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = MobileViTImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = MobileViTImageProcessingTester(self ) @property def lowerCAmelCase_ ( self : Tuple ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_flip_channel_order""" ) ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def lowerCAmelCase_ ( self : List[str] ): pass def lowerCAmelCase_ ( self : Dict ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : str ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : Optional[int] ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
289
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """microsoft/beit-base-patch16-224-pt22k""": ( """https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json""" ), # See all BEiT models at https://huggingface.co/models?filter=beit } class a ( lowerCAmelCase_ ): _snake_case : Optional[int] = 'beit' def __init__( self : Optional[Any] , __lowerCAmelCase : Any=8192 , __lowerCAmelCase : Tuple=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Optional[int]=12 , __lowerCAmelCase : Union[str, Any]=3072 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : int=1e-1_2 , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : Union[str, Any]=16 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Any=False , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[int]=[3, 5, 7, 11] , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Optional[int]=0.4 , __lowerCAmelCase : List[Any]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : List[Any]=255 , **__lowerCAmelCase : List[str] , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = use_mask_token _UpperCAmelCase = use_absolute_position_embeddings _UpperCAmelCase = use_relative_position_bias _UpperCAmelCase = use_shared_relative_position_bias _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = drop_path_rate _UpperCAmelCase = use_mean_pooling # decode head attributes (semantic segmentation) _UpperCAmelCase = out_indices _UpperCAmelCase = pool_scales # auxiliary head attributes (semantic segmentation) _UpperCAmelCase = use_auxiliary_head _UpperCAmelCase = auxiliary_loss_weight _UpperCAmelCase = auxiliary_channels _UpperCAmelCase = auxiliary_num_convs _UpperCAmelCase = auxiliary_concat_input _UpperCAmelCase = semantic_loss_ignore_index class a ( lowerCAmelCase_ ): _snake_case : List[Any] = version.parse('1.11' ) @property def lowerCAmelCase_ ( self : Tuple ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase_ ( self : Optional[Any] ): return 1e-4
289
"""simple docstring""" from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Any = 'efficientnet' def __init__( self : Any , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 600 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2560 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.001 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : List[Any] , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = width_coefficient _UpperCAmelCase = depth_coefficient _UpperCAmelCase = depth_divisor _UpperCAmelCase = kernel_sizes _UpperCAmelCase = in_channels _UpperCAmelCase = out_channels _UpperCAmelCase = depthwise_padding _UpperCAmelCase = strides _UpperCAmelCase = num_block_repeats _UpperCAmelCase = expand_ratios _UpperCAmelCase = squeeze_expansion_ratio _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dim _UpperCAmelCase = pooling_type _UpperCAmelCase = initializer_range _UpperCAmelCase = batch_norm_eps _UpperCAmelCase = batch_norm_momentum _UpperCAmelCase = dropout_rate _UpperCAmelCase = drop_connect_rate _UpperCAmelCase = sum(__lowerCAmelCase ) * 4 class a ( lowerCAmelCase_ ): _snake_case : Dict = version.parse('1.11' ) @property def lowerCAmelCase_ ( self : Any ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase_ ( self : int ): return 1e-5
289
1
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class a ( lowerCAmelCase_ ): _snake_case : Tuple = ['image_processor', 'tokenizer'] _snake_case : Any = 'CLIPImageProcessor' _snake_case : List[Any] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __lowerCAmelCase , ) _UpperCAmelCase = kwargs.pop("""feature_extractor""" ) _UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__lowerCAmelCase , __lowerCAmelCase ) def __call__( self : Optional[Any] , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : List[str] ): if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: _UpperCAmelCase = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if images is not None: _UpperCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None and images is not None: _UpperCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase ) def lowerCAmelCase_ ( self : Any , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ): return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : int , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Tuple ): return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = self.tokenizer.model_input_names _UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
289
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class a : def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : str=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Optional[int]=16 , __lowerCAmelCase : Dict=36 , __lowerCAmelCase : Optional[Any]=6 , __lowerCAmelCase : List[str]=6 , __lowerCAmelCase : Union[str, Any]=6 , __lowerCAmelCase : str=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Any=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = embedding_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_hidden_groups _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Union[str, Any] ): return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Any ): _UpperCAmelCase = AlbertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ): _UpperCAmelCase = AlbertForPreTraining(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , sentence_order_label=__lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ): _UpperCAmelCase = AlbertForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ): _UpperCAmelCase = AlbertForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = AlbertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = AlbertForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = AlbertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : str = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) _snake_case : Tuple = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) _snake_case : Dict = True def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any]=False ): _UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): _UpperCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase ) _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = AlbertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Optional[int] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase = type self.model_tester.create_and_check_model(*__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = AlbertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = AlbertModel.from_pretrained("""albert-base-v2""" ) _UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] _UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
289
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ): """simple docstring""" if isinstance(lowercase ,lowercase ): raise TypeError("""'float' object cannot be interpreted as an integer""" ) if isinstance(lowercase ,lowercase ): raise TypeError("""'str' object cannot be interpreted as an integer""" ) if num == 0: return "0b0" _UpperCAmelCase = False if num < 0: _UpperCAmelCase = True _UpperCAmelCase = -num _UpperCAmelCase = [] while num > 0: binary.insert(0 ,num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(lowercase ) for e in binary ) return "0b" + "".join(str(lowercase ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
289
"""simple docstring""" UpperCAmelCase__ = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" # Return True if there is node that has not iterated. _UpperCAmelCase = [False] * len(lowercase ) _UpperCAmelCase = [s] _UpperCAmelCase = True while queue: _UpperCAmelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase ) _UpperCAmelCase = True _UpperCAmelCase = u return visited[t] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = [-1] * (len(lowercase )) _UpperCAmelCase = 0 _UpperCAmelCase = [] _UpperCAmelCase = [i[:] for i in graph] # Record original cut, copy. while bfs(lowercase ,lowercase ,lowercase ,lowercase ): _UpperCAmelCase = float("""Inf""" ) _UpperCAmelCase = sink while s != source: # Find the minimum value in select path _UpperCAmelCase = min(lowercase ,graph[parent[s]][s] ) _UpperCAmelCase = parent[s] max_flow += path_flow _UpperCAmelCase = sink while v != source: _UpperCAmelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCAmelCase = parent[v] for i in range(len(lowercase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
289
1
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)] def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = os.path.dirname(os.path.realpath(lowercase ) ) _UpperCAmelCase = os.path.join(lowercase ,"""words.txt""" ) _UpperCAmelCase = """""" with open(lowercase ) as f: _UpperCAmelCase = f.readline() _UpperCAmelCase = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )] _UpperCAmelCase = [ word for word in [sum(ord(lowercase ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(lowercase ) if __name__ == "__main__": print(solution())
289
"""simple docstring""" import math class a : def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ): _UpperCAmelCase = 0.0 _UpperCAmelCase = 0.0 for i in range(len(__lowerCAmelCase ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : list[list[int | float]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : float ): for i in range(len(__lowerCAmelCase ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def __UpperCAmelCase ( ): """simple docstring""" # Training Examples ( m, n ) _UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _UpperCAmelCase = SelfOrganizingMap() _UpperCAmelCase = 3 _UpperCAmelCase = 0.5 for _ in range(lowercase ): for j in range(len(lowercase ) ): # training sample _UpperCAmelCase = training_samples[j] # Compute the winning vector _UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase ) # Update the winning vector _UpperCAmelCase = self_organizing_map.update(lowercase ,lowercase ,lowercase ,lowercase ) # classify test sample _UpperCAmelCase = [0, 0, 0, 1] _UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase ) # results print(f'''Clusters that the test sample belongs to : {winner}''' ) print(f'''Weights that have been trained : {weights}''' ) # running the main() function if __name__ == "__main__": main()
289
1
"""simple docstring""" UpperCAmelCase__ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) UpperCAmelCase__ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 1_2, """Pm""": 1_5, """Em""": 1_8, """Zm""": 2_1, """Ym""": 2_4, } def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = from_type.lower().strip("""s""" ) _UpperCAmelCase = to_type.lower().strip("""s""" ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) if from_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'from_type\' value: {from_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) if to_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'to_type\' value: {to_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) _UpperCAmelCase = METRIC_CONVERSION[from_sanitized] _UpperCAmelCase = METRIC_CONVERSION[to_sanitized] _UpperCAmelCase = 1 if from_exponent > to_exponent: _UpperCAmelCase = from_exponent - to_exponent else: _UpperCAmelCase = -(to_exponent - from_exponent) return value * pow(10 ,lowercase ) if __name__ == "__main__": from doctest import testmod testmod()
289
"""simple docstring""" import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class a : def __init__( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=99 , __lowerCAmelCase : int=64 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Union[str, Any]=64 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : str=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def lowerCAmelCase_ ( self : Union[str, Any] ): return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Optional[int] ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ): _UpperCAmelCase = MPNetModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ): _UpperCAmelCase = MPNetForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = MPNetForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = MPNetForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = MPNetForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.prepare_config_and_inputs() ((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : List[Any] = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) _snake_case : Union[str, Any] = ( { 'feature-extraction': MPNetModel, 'fill-mask': MPNetForMaskedLM, 'question-answering': MPNetForQuestionAnswering, 'text-classification': MPNetForSequenceClassification, 'token-classification': MPNetForTokenClassification, 'zero-shot': MPNetForSequenceClassification, } if is_torch_available() else {} ) _snake_case : int = False _snake_case : List[Any] = True def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = MPNetModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Dict ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*__lowerCAmelCase ) @require_torch class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = MPNetModel.from_pretrained("""microsoft/mpnet-base""" ) _UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCAmelCase = model(__lowerCAmelCase )[0] _UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor( [[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
289
1
"""simple docstring""" import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 UpperCAmelCase__ = data_utils.TransfoXLTokenizer UpperCAmelCase__ = data_utils.TransfoXLCorpus UpperCAmelCase__ = data_utils UpperCAmelCase__ = data_utils def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(lowercase ,"""rb""" ) as fp: _UpperCAmelCase = pickle.load(lowercase ,encoding="""latin1""" ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) _UpperCAmelCase = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""] print(f'''Save vocabulary to {pytorch_vocab_dump_path}''' ) _UpperCAmelCase = corpus.vocab.__dict__ torch.save(lowercase ,lowercase ) _UpperCAmelCase = corpus.__dict__ corpus_dict_no_vocab.pop("""vocab""" ,lowercase ) _UpperCAmelCase = pytorch_dump_folder_path + """/""" + CORPUS_NAME print(f'''Save dataset to {pytorch_dataset_dump_path}''' ) torch.save(lowercase ,lowercase ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model _UpperCAmelCase = os.path.abspath(lowercase ) _UpperCAmelCase = os.path.abspath(lowercase ) print(f'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' ) # Initialise PyTorch model if transfo_xl_config_file == "": _UpperCAmelCase = TransfoXLConfig() else: _UpperCAmelCase = TransfoXLConfig.from_json_file(lowercase ) print(f'''Building PyTorch model from configuration: {config}''' ) _UpperCAmelCase = TransfoXLLMHeadModel(lowercase ) _UpperCAmelCase = load_tf_weights_in_transfo_xl(lowercase ,lowercase ,lowercase ) # Save pytorch-model _UpperCAmelCase = os.path.join(lowercase ,lowercase ) _UpperCAmelCase = os.path.join(lowercase ,lowercase ) print(f'''Save PyTorch model to {os.path.abspath(lowercase )}''' ) torch.save(model.state_dict() ,lowercase ) print(f'''Save configuration file to {os.path.abspath(lowercase )}''' ) with open(lowercase ,"""w""" ,encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the folder to store the PyTorch model or dataset/vocab.""", ) parser.add_argument( """--tf_checkpoint_path""", default="""""", type=str, help="""An optional path to a TensorFlow checkpoint path to be converted.""", ) parser.add_argument( """--transfo_xl_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--transfo_xl_dataset_file""", default="""""", type=str, help="""An optional dataset file to be converted in a vocabulary.""", ) UpperCAmelCase__ = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
289
"""simple docstring""" UpperCAmelCase__ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) UpperCAmelCase__ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 1_2, """Pm""": 1_5, """Em""": 1_8, """Zm""": 2_1, """Ym""": 2_4, } def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = from_type.lower().strip("""s""" ) _UpperCAmelCase = to_type.lower().strip("""s""" ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) if from_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'from_type\' value: {from_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) if to_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'to_type\' value: {to_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) _UpperCAmelCase = METRIC_CONVERSION[from_sanitized] _UpperCAmelCase = METRIC_CONVERSION[to_sanitized] _UpperCAmelCase = 1 if from_exponent > to_exponent: _UpperCAmelCase = from_exponent - to_exponent else: _UpperCAmelCase = -(to_exponent - from_exponent) return value * pow(10 ,lowercase ) if __name__ == "__main__": from doctest import testmod testmod()
289
1
"""simple docstring""" import math import sys def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = """""" try: with open(lowercase ,"""rb""" ) as binary_file: _UpperCAmelCase = binary_file.read() for dat in data: _UpperCAmelCase = f'''{dat:08b}''' result += curr_byte return result except OSError: print("""File not accessible""" ) sys.exit() def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = {"""0""": """0""", """1""": """1"""} _UpperCAmelCase , _UpperCAmelCase = """""", """""" _UpperCAmelCase = len(lowercase ) for i in range(len(lowercase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue _UpperCAmelCase = lexicon[curr_string] result += last_match_id _UpperCAmelCase = last_match_id + """0""" if math.loga(lowercase ).is_integer(): _UpperCAmelCase = {} for curr_key in list(lowercase ): _UpperCAmelCase = lexicon.pop(lowercase ) _UpperCAmelCase = new_lex _UpperCAmelCase = last_match_id + """1""" index += 1 _UpperCAmelCase = """""" return result def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = 8 try: with open(lowercase ,"""wb""" ) as opened_file: _UpperCAmelCase = [ to_write[i : i + byte_length] for i in range(0 ,len(lowercase ) ,lowercase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append("""10000000""" ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(lowercase ,2 ).to_bytes(1 ,byteorder="""big""" ) ) except OSError: print("""File not accessible""" ) sys.exit() def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = 0 for letter in data_bits: if letter == "1": break counter += 1 _UpperCAmelCase = data_bits[counter:] _UpperCAmelCase = data_bits[counter + 1 :] return data_bits def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = read_file_binary(lowercase ) _UpperCAmelCase = remove_prefix(lowercase ) _UpperCAmelCase = decompress_data(lowercase ) write_file_binary(lowercase ,lowercase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
289
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase__ = 1_6 UpperCAmelCase__ = 3_2 def __UpperCAmelCase ( lowercase ,lowercase = 16 ): """simple docstring""" _UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) _UpperCAmelCase = DataLoader( tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase__ = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1": _UpperCAmelCase = 2 # Initialize accelerator _UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config["""lr"""] _UpperCAmelCase = int(config["""num_epochs"""] ) _UpperCAmelCase = int(config["""seed"""] ) _UpperCAmelCase = int(config["""batch_size"""] ) _UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowercase ) def inner_training_loop(lowercase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(lowercase ,lowercase ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowercase ,references=lowercase ,) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' ,lowercase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" ,) parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowercase ,lowercase ) if __name__ == "__main__": main()
289
1
"""simple docstring""" from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable UpperCAmelCase__ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["""DPTFeatureExtractor"""] UpperCAmelCase__ = ["""DPTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """DPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """DPTForDepthEstimation""", """DPTForSemanticSegmentation""", """DPTModel""", """DPTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
289
"""simple docstring""" import warnings warnings.warn( """memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """ """`from accelerate import find_executable_batch_size` to avoid this warning.""", FutureWarning, )
289
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Tuple = KandinskyVaaControlnetPipeline _snake_case : Optional[Any] = ['image_embeds', 'negative_image_embeds', 'hint'] _snake_case : Any = ['image_embeds', 'negative_image_embeds', 'hint'] _snake_case : Any = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _snake_case : Tuple = False @property def lowerCAmelCase_ ( self : List[Any] ): return 32 @property def lowerCAmelCase_ ( self : Optional[int] ): return 32 @property def lowerCAmelCase_ ( self : Dict ): return self.time_input_dim @property def lowerCAmelCase_ ( self : Optional[Any] ): return self.time_input_dim * 4 @property def lowerCAmelCase_ ( self : List[str] ): return 100 @property def lowerCAmelCase_ ( self : int ): torch.manual_seed(0 ) _UpperCAmelCase = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } _UpperCAmelCase = UNetaDConditionModel(**__lowerCAmelCase ) return model @property def lowerCAmelCase_ ( self : Dict ): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def lowerCAmelCase_ ( self : Union[str, Any] ): torch.manual_seed(0 ) _UpperCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = self.dummy_unet _UpperCAmelCase = self.dummy_movq _UpperCAmelCase = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowerCAmelCase , ) _UpperCAmelCase = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str=0 ): _UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) _UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowerCAmelCase ) # create hint _UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) if str(__lowerCAmelCase ).startswith("""mps""" ): _UpperCAmelCase = torch.manual_seed(__lowerCAmelCase ) else: _UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _UpperCAmelCase = { """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = """cpu""" _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase ) _UpperCAmelCase = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _UpperCAmelCase = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) ) _UpperCAmelCase = output.images _UpperCAmelCase = pipe( **self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0] _UpperCAmelCase = image[0, -3:, -3:, -1] _UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _UpperCAmelCase = np.array( [0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" ) _UpperCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) _UpperCAmelCase = torch.from_numpy(np.array(__lowerCAmelCase ) ).float() / 255.0 _UpperCAmelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) _UpperCAmelCase = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__lowerCAmelCase ) _UpperCAmelCase = KandinskyVaaControlnetPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa ) _UpperCAmelCase = pipeline.to(__lowerCAmelCase ) pipeline.set_progress_bar_config(disable=__lowerCAmelCase ) _UpperCAmelCase = """A robot, 4k photo""" _UpperCAmelCase = torch.Generator(device="""cuda""" ).manual_seed(0 ) _UpperCAmelCase , _UpperCAmelCase = pipe_prior( __lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() _UpperCAmelCase = torch.Generator(device="""cuda""" ).manual_seed(0 ) _UpperCAmelCase = pipeline( image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , hint=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=100 , output_type="""np""" , ) _UpperCAmelCase = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
289
"""simple docstring""" import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin UpperCAmelCase__ = logging.get_logger(__name__) enable_full_determinism() class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = UNetaDModel _snake_case : List[str] = 'sample' @property def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (32, 32) _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : List[Any] ): return (3, 32, 32) @property def lowerCAmelCase_ ( self : Optional[Any] ): return (3, 32, 32) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = { """block_out_channels""": (32, 64), """down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""), """up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""), """attention_head_dim""": 3, """out_channels""": 3, """in_channels""": 3, """layers_per_block""": 2, """sample_size""": 32, } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : int = UNetaDModel _snake_case : Optional[Any] = 'sample' @property def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = 4 _UpperCAmelCase = 4 _UpperCAmelCase = (32, 32) _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : Optional[Any] ): return (4, 32, 32) @property def lowerCAmelCase_ ( self : Dict ): return (4, 32, 32) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = { """sample_size""": 32, """in_channels""": 4, """out_channels""": 4, """layers_per_block""": 2, """block_out_channels""": (32, 64), """attention_head_dim""": 32, """down_block_types""": ("""DownBlock2D""", """DownBlock2D"""), """up_block_types""": ("""UpBlock2D""", """UpBlock2D"""), } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) _UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model.to(__lowerCAmelCase ) _UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self : str ): # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model_accelerate.to(__lowerCAmelCase ) model_accelerate.eval() _UpperCAmelCase = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) _UpperCAmelCase = noise.to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) _UpperCAmelCase = model_accelerate(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained( """fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase , low_cpu_mem_usage=__lowerCAmelCase ) model_normal_load.to(__lowerCAmelCase ) model_normal_load.eval() _UpperCAmelCase = model_normal_load(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ) model.eval() model.to(__lowerCAmelCase ) _UpperCAmelCase = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) _UpperCAmelCase = noise.to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) ) class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[Any] = UNetaDModel _snake_case : str = 'sample' @property def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str=(32, 32) ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : Any ): return (3, 32, 32) @property def lowerCAmelCase_ ( self : Union[str, Any] ): return (3, 32, 32) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = { """block_out_channels""": [32, 64, 64, 64], """in_channels""": 3, """layers_per_block""": 1, """out_channels""": 3, """time_embedding_type""": """fourier""", """norm_eps""": 1e-6, """mid_block_scale_factor""": math.sqrt(2.0 ), """norm_num_groups""": None, """down_block_types""": [ """SkipDownBlock2D""", """AttnSkipDownBlock2D""", """SkipDownBlock2D""", """SkipDownBlock2D""", ], """up_block_types""": [ """SkipUpBlock2D""", """SkipUpBlock2D""", """AttnSkipUpBlock2D""", """SkipUpBlock2D""", ], } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict @slow def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) _UpperCAmelCase = self.dummy_input _UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(__lowerCAmelCase ) _UpperCAmelCase = noise _UpperCAmelCase = model(**__lowerCAmelCase ) assert image is not None, "Make sure output is not None" @slow def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ) model.to(__lowerCAmelCase ) _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (256, 256) _UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" ) model.to(__lowerCAmelCase ) _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (32, 32) _UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def lowerCAmelCase_ ( self : List[str] ): # not required for this model pass
289
1
"""simple docstring""" import math UpperCAmelCase__ = 1_0 UpperCAmelCase__ = 7 UpperCAmelCase__ = BALLS_PER_COLOUR * NUM_COLOURS def __UpperCAmelCase ( lowercase = 20 ): """simple docstring""" _UpperCAmelCase = math.comb(lowercase ,lowercase ) _UpperCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,lowercase ) _UpperCAmelCase = NUM_COLOURS * (1 - missing_colour / total) return f'''{result:.9f}''' if __name__ == "__main__": print(solution(2_0))
289
"""simple docstring""" import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : int = StableUnCLIPPipeline _snake_case : str = TEXT_TO_IMAGE_PARAMS _snake_case : Any = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS _snake_case : str = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _snake_case : str = False def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = 32 _UpperCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) _UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=__lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) _UpperCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__lowerCAmelCase , num_layers=1 , ) torch.manual_seed(0 ) _UpperCAmelCase = DDPMScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , ) # regular denoising components torch.manual_seed(0 ) _UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=__lowerCAmelCase ) _UpperCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) _UpperCAmelCase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCAmelCase , layers_per_block=1 , upcast_attention=__lowerCAmelCase , use_linear_projection=__lowerCAmelCase , ) torch.manual_seed(0 ) _UpperCAmelCase = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , ) torch.manual_seed(0 ) _UpperCAmelCase = AutoencoderKL() _UpperCAmelCase = { # prior components """prior_tokenizer""": prior_tokenizer, """prior_text_encoder""": prior_text_encoder, """prior""": prior, """prior_scheduler""": prior_scheduler, # image noising components """image_normalizer""": image_normalizer, """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder, """unet""": unet, """scheduler""": scheduler, """vae""": vae, } return components def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=0 ): if str(__lowerCAmelCase ).startswith("""mps""" ): _UpperCAmelCase = torch.manual_seed(__lowerCAmelCase ) else: _UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _UpperCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """prior_num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = torch_device == """cpu""" self._test_attention_slicing_forward_pass(test_max_difference=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=__lowerCAmelCase ) @slow @require_torch_gpu class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" ) _UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _UpperCAmelCase = pipe("""anime turle""" , generator=__lowerCAmelCase , output_type="""np""" ) _UpperCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) _UpperCAmelCase = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _UpperCAmelCase = pipe( """anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , ) _UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
289
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def __UpperCAmelCase ( lowercase ,lowercase=0 ): """simple docstring""" return sorted(lowercase ,key=lambda lowercase : x[column] ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=float("""inf""" ) ): """simple docstring""" for i in range(points_counts - 1 ): for j in range(i + 1 ,lowercase ): _UpperCAmelCase = euclidean_distance_sqr(points[i] ,points[j] ) if current_dis < min_dis: _UpperCAmelCase = current_dis return min_dis def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=float("""inf""" ) ): """simple docstring""" for i in range(min(6 ,points_counts - 1 ) ,lowercase ): for j in range(max(0 ,i - 6 ) ,lowercase ): _UpperCAmelCase = euclidean_distance_sqr(points[i] ,points[j] ) if current_dis < min_dis: _UpperCAmelCase = current_dis return min_dis def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" # base case if points_counts <= 3: return dis_between_closest_pair(lowercase ,lowercase ) # recursion _UpperCAmelCase = points_counts // 2 _UpperCAmelCase = closest_pair_of_points_sqr( lowercase ,points_sorted_on_y[:mid] ,lowercase ) _UpperCAmelCase = closest_pair_of_points_sqr( lowercase ,points_sorted_on_y[mid:] ,points_counts - mid ) _UpperCAmelCase = min(lowercase ,lowercase ) _UpperCAmelCase = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(lowercase ) _UpperCAmelCase = dis_between_closest_in_strip( lowercase ,len(lowercase ) ,lowercase ) return min(lowercase ,lowercase ) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = column_based_sort(lowercase ,column=0 ) _UpperCAmelCase = column_based_sort(lowercase ,column=1 ) return ( closest_pair_of_points_sqr( lowercase ,lowercase ,lowercase ) ) ** 0.5 if __name__ == "__main__": UpperCAmelCase__ = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)] print("""Distance:""", closest_pair_of_points(points, len(points)))
289
"""simple docstring""" from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
289
1
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def __UpperCAmelCase ( lowercase ): """simple docstring""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f) or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) # or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) # or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) # or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) # or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) # or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f) or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) # ): # return True return False def __UpperCAmelCase ( lowercase ): """simple docstring""" # word like '180' or '身高' or '神' for char in word: _UpperCAmelCase = ord(lowercase ) if not _is_chinese_char(lowercase ): return 0 return 1 def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = set() for token in tokens: _UpperCAmelCase = len(lowercase ) > 1 and is_chinese(lowercase ) if chinese_word: word_set.add(lowercase ) _UpperCAmelCase = list(lowercase ) return word_list def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if not chinese_word_set: return bert_tokens _UpperCAmelCase = max([len(lowercase ) for w in chinese_word_set] ) _UpperCAmelCase = bert_tokens _UpperCAmelCase , _UpperCAmelCase = 0, len(lowercase ) while start < end: _UpperCAmelCase = True if is_chinese(bert_word[start] ): _UpperCAmelCase = min(end - start ,lowercase ) for i in range(lowercase ,1 ,-1 ): _UpperCAmelCase = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 ,start + i ): _UpperCAmelCase = """##""" + bert_word[j] _UpperCAmelCase = start + i _UpperCAmelCase = False break if single_word: start += 1 return bert_word def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = [] for i in range(0 ,len(lowercase ) ,1_00 ): _UpperCAmelCase = ltp_tokenizer.seg(lines[i : i + 1_00] )[0] _UpperCAmelCase = [get_chinese_word(lowercase ) for r in res] ltp_res.extend(lowercase ) assert len(lowercase ) == len(lowercase ) _UpperCAmelCase = [] for i in range(0 ,len(lowercase ) ,1_00 ): _UpperCAmelCase = bert_tokenizer(lines[i : i + 1_00] ,add_special_tokens=lowercase ,truncation=lowercase ,max_length=5_12 ) bert_res.extend(res["""input_ids"""] ) assert len(lowercase ) == len(lowercase ) _UpperCAmelCase = [] for input_ids, chinese_word in zip(lowercase ,lowercase ): _UpperCAmelCase = [] for id in input_ids: _UpperCAmelCase = bert_tokenizer._convert_id_to_token(lowercase ) input_tokens.append(lowercase ) _UpperCAmelCase = add_sub_symbol(lowercase ,lowercase ) _UpperCAmelCase = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(lowercase ): if token[:2] == "##": _UpperCAmelCase = token[2:] # save chinese tokens' pos if len(lowercase ) == 1 and _is_chinese_char(ord(lowercase ) ): ref_id.append(lowercase ) ref_ids.append(lowercase ) assert len(lowercase ) == len(lowercase ) return ref_ids def __UpperCAmelCase ( lowercase ): """simple docstring""" # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name ,"""r""" ,encoding="""utf-8""" ) as f: _UpperCAmelCase = f.readlines() _UpperCAmelCase = [line.strip() for line in data if len(lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' _UpperCAmelCase = LTP(args.ltp ) # faster in GPU device _UpperCAmelCase = BertTokenizer.from_pretrained(args.bert ) _UpperCAmelCase = prepare_ref(lowercase ,lowercase ,lowercase ) with open(args.save_path ,"""w""" ,encoding="""utf-8""" ) as f: _UpperCAmelCase = [json.dumps(lowercase ) + """\n""" for ref in ref_ids] f.writelines(lowercase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser(description="""prepare_chinese_ref""") parser.add_argument( """--file_name""", type=str, default="""./resources/chinese-demo.txt""", help="""file need process, same as training data in lm""", ) parser.add_argument( """--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path""" ) parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""") parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""") UpperCAmelCase__ = parser.parse_args() main(args)
289
"""simple docstring""" import requests UpperCAmelCase__ = """""" # <-- Put your OpenWeatherMap appid here! UpperCAmelCase__ = """https://api.openweathermap.org/data/2.5/""" def __UpperCAmelCase ( lowercase = "Chicago" ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """weather""" ,params=locals() ).json() def __UpperCAmelCase ( lowercase = "Kolkata, India" ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """forecast""" ,params=locals() ).json() def __UpperCAmelCase ( lowercase = 55.68 ,lowercase = 12.57 ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """onecall""" ,params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: UpperCAmelCase__ = input("""Enter a location:""").strip() if location: pprint(current_weather(location)) else: break
289
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor UpperCAmelCase__ = logging.get_logger(__name__) class a ( lowerCAmelCase_ ): def __init__( self : List[str] , *__lowerCAmelCase : str , **__lowerCAmelCase : str ): warnings.warn( """The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use SegformerImageProcessor instead.""" , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
289
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = get_failure_array(lowercase ) # 2) Step through text searching for pattern _UpperCAmelCase , _UpperCAmelCase = 0, 0 # index into text, pattern while i < len(lowercase ): if pattern[j] == text[i]: if j == (len(lowercase ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: _UpperCAmelCase = failure[j - 1] continue i += 1 return False def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [0] _UpperCAmelCase = 0 _UpperCAmelCase = 1 while j < len(lowercase ): if pattern[i] == pattern[j]: i += 1 elif i > 0: _UpperCAmelCase = failure[i - 1] continue j += 1 failure.append(lowercase ) return failure if __name__ == "__main__": # Test 1) UpperCAmelCase__ = """abc1abc12""" UpperCAmelCase__ = """alskfjaldsabc1abc1abc12k23adsfabcabc""" UpperCAmelCase__ = """alskfjaldsk23adsfabcabc""" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) UpperCAmelCase__ = """ABABX""" UpperCAmelCase__ = """ABABZABABYABABX""" assert kmp(pattern, text) # Test 3) UpperCAmelCase__ = """AAAB""" UpperCAmelCase__ = """ABAAAAAB""" assert kmp(pattern, text) # Test 4) UpperCAmelCase__ = """abcdabcy""" UpperCAmelCase__ = """abcxabcdabxabcdabcdabcy""" assert kmp(pattern, text) # Test 5) UpperCAmelCase__ = """aabaabaaa""" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
289
1
"""simple docstring""" import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" # Construct model if gpta_config_file == "": _UpperCAmelCase = GPTaConfig() else: _UpperCAmelCase = GPTaConfig.from_json_file(lowercase ) _UpperCAmelCase = GPTaModel(lowercase ) # Load weights from numpy load_tf_weights_in_gpta(lowercase ,lowercase ,lowercase ) # Save pytorch-model _UpperCAmelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME _UpperCAmelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' ) torch.save(model.state_dict() ,lowercase ) print(f'''Save configuration file to {pytorch_config_dump_path}''' ) with open(lowercase ,"""w""" ,encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--gpt2_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) UpperCAmelCase__ = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
289
"""simple docstring""" from sklearn.metrics import recall_score import datasets UpperCAmelCase__ = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ UpperCAmelCase__ = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ UpperCAmelCase__ = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): def lowerCAmelCase_ ( self : Tuple ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]="binary" , __lowerCAmelCase : Any=None , __lowerCAmelCase : int="warn" , ): _UpperCAmelCase = recall_score( __lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase , zero_division=__lowerCAmelCase , ) return {"recall": float(__lowerCAmelCase ) if score.size == 1 else score}
289
1
"""simple docstring""" import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging UpperCAmelCase__ = logging.get_logger(__name__) logging.set_verbosity_info() def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if "xprophetnet" in prophetnet_checkpoint_path: _UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase ) _UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained( lowercase ,output_loading_info=lowercase ) else: _UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase ) _UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained( lowercase ,output_loading_info=lowercase ) _UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""] _UpperCAmelCase = { """self_attn""": """ngram_self_attn""", """cross_attn""": """encoder_attn""", """cross_attn_layer_norm""": """encoder_attn_layer_norm""", """feed_forward_layer_norm""": """final_layer_norm""", """feed_forward""": """""", """intermediate""": """fc1""", """output""": """fc2""", """key_proj""": """k_proj""", """query_proj""": """q_proj""", """value_proj""": """v_proj""", """word_embeddings""": """embed_tokens""", """embeddings_layer_norm""": """emb_layer_norm""", """relative_pos_embeddings""": """relative_linear""", """ngram_embeddings""": """ngram_input_embed""", """position_embeddings""": """embed_positions""", } for key in loading_info["missing_keys"]: _UpperCAmelCase = key.split(""".""" ) if attributes[0] == "lm_head": _UpperCAmelCase = prophet _UpperCAmelCase = prophet_old else: _UpperCAmelCase = prophet.prophetnet _UpperCAmelCase = prophet_old.model _UpperCAmelCase = False for attribute in attributes: if attribute in mapping: _UpperCAmelCase = mapping[attribute] if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0: _UpperCAmelCase = attribute elif hasattr(lowercase ,lowercase ): _UpperCAmelCase = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" _UpperCAmelCase = old_model.weight logger.info(f'''{attribute} is initialized.''' ) _UpperCAmelCase = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" _UpperCAmelCase = old_model.bias logger.info(f'''{attribute} is initialized''' ) _UpperCAmelCase = True break elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ): _UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3 _UpperCAmelCase = getattr(lowercase ,lowercase ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": _UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) _UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": _UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) _UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": _UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) _UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) _UpperCAmelCase = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings." _UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] ) _UpperCAmelCase = True break if attribute.isdigit(): _UpperCAmelCase = model[int(lowercase )] _UpperCAmelCase = old_model[int(lowercase )] else: _UpperCAmelCase = getattr(lowercase ,lowercase ) if old_attribute == "": _UpperCAmelCase = old_model else: if not hasattr(lowercase ,lowercase ): raise ValueError(f'''{old_model} does not have {old_attribute}''' ) _UpperCAmelCase = getattr(lowercase ,lowercase ) if not is_key_init: raise ValueError(f'''{key} was not correctly initialized!''' ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) prophet.save_pretrained(lowercase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase__ = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
289
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase__ = """platform""" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class a : _snake_case : Tuple = PegasusConfig _snake_case : int = {} _snake_case : str = 'gelu' def __init__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=99 , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=20 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Any=0 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = eos_token_id _UpperCAmelCase = pad_token_id _UpperCAmelCase = bos_token_id def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) _UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) _UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _UpperCAmelCase = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return config, inputs_dict def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ): _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ): _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _UpperCAmelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ,): """simple docstring""" if attention_mask is None: _UpperCAmelCase = np.not_equal(lowercase ,config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: _UpperCAmelCase = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ), ] ,axis=-1 ,) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Dict = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) _snake_case : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () _snake_case : Optional[Any] = True _snake_case : List[str] = False _snake_case : Dict = False _snake_case : str = False def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = FlaxPegasusModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_class(__lowerCAmelCase ) @jax.jit def encode_jitted(__lowerCAmelCase : str , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Dict ): return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase ) with self.subTest("""JIT Enabled""" ): _UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = model_class(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) _UpperCAmelCase = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(__lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ): return model.decode( decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , ) with self.subTest("""JIT Enabled""" ): _UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCAmelCase_ ( self : Optional[int] ): for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__lowerCAmelCase ) _UpperCAmelCase = np.ones((1, 1) ) _UpperCAmelCase = model(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) _UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) _UpperCAmelCase = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] _UpperCAmelCase = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] _UpperCAmelCase = tokenizer(__lowerCAmelCase , return_tensors="""np""" , truncation=__lowerCAmelCase , max_length=512 , padding=__lowerCAmelCase ) _UpperCAmelCase = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences _UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) assert tgt_text == decoded
289
1
"""simple docstring""" import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class a ( nn.Module ): def __init__( self : Optional[int] ): super().__init__() _UpperCAmelCase = nn.Linear(3 , 4 ) _UpperCAmelCase = nn.BatchNormad(4 ) _UpperCAmelCase = nn.Linear(4 , 5 ) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : str ): return self.lineara(self.batchnorm(self.lineara(__lowerCAmelCase ) ) ) class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(__lowerCAmelCase , model.state_dict() ) _UpperCAmelCase = os.path.join(__lowerCAmelCase , """index.json""" ) self.assertTrue(os.path.isfile(__lowerCAmelCase ) ) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: _UpperCAmelCase = os.path.join(__lowerCAmelCase , f'''{key}.dat''' ) self.assertTrue(os.path.isfile(__lowerCAmelCase ) ) # TODO: add tests on the fact weights are properly loaded def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: _UpperCAmelCase = torch.randn(2 , 3 , dtype=__lowerCAmelCase ) with TemporaryDirectory() as tmp_dir: _UpperCAmelCase = offload_weight(__lowerCAmelCase , """weight""" , __lowerCAmelCase , {} ) _UpperCAmelCase = os.path.join(__lowerCAmelCase , """weight.dat""" ) self.assertTrue(os.path.isfile(__lowerCAmelCase ) ) self.assertDictEqual(__lowerCAmelCase , {"""weight""": {"""shape""": [2, 3], """dtype""": str(__lowerCAmelCase ).split(""".""" )[1]}} ) _UpperCAmelCase = load_offloaded_weight(__lowerCAmelCase , index["""weight"""] ) self.assertTrue(torch.equal(__lowerCAmelCase , __lowerCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = ModelForTest() _UpperCAmelCase = model.state_dict() _UpperCAmelCase = {k: v for k, v in state_dict.items() if """linear2""" not in k} _UpperCAmelCase = {k: v for k, v in state_dict.items() if """linear2""" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = OffloadedWeightsLoader(state_dict=__lowerCAmelCase , save_folder=__lowerCAmelCase ) # Every key is there with the right value self.assertEqual(sorted(__lowerCAmelCase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(__lowerCAmelCase , weight_map[key] ) ) _UpperCAmelCase = {k: v for k, v in state_dict.items() if """weight""" in k} _UpperCAmelCase = {k: v for k, v in state_dict.items() if """weight""" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = OffloadedWeightsLoader(state_dict=__lowerCAmelCase , save_folder=__lowerCAmelCase ) # Every key is there with the right value self.assertEqual(sorted(__lowerCAmelCase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(__lowerCAmelCase , weight_map[key] ) ) with TemporaryDirectory() as tmp_dir: offload_state_dict(__lowerCAmelCase , __lowerCAmelCase ) # Duplicates are removed _UpperCAmelCase = OffloadedWeightsLoader(state_dict=__lowerCAmelCase , save_folder=__lowerCAmelCase ) # Every key is there with the right value self.assertEqual(sorted(__lowerCAmelCase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(__lowerCAmelCase , weight_map[key] ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = {"""a.1""": 0, """a.10""": 1, """a.2""": 2} _UpperCAmelCase = extract_submodules_state_dict(__lowerCAmelCase , ["""a.1""", """a.2"""] ) self.assertDictEqual(__lowerCAmelCase , {"""a.1""": 0, """a.2""": 2} ) _UpperCAmelCase = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2} _UpperCAmelCase = extract_submodules_state_dict(__lowerCAmelCase , ["""a.1""", """a.2"""] ) self.assertDictEqual(__lowerCAmelCase , {"""a.1.a""": 0, """a.2.a""": 2} )
289
"""simple docstring""" import math def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [] _UpperCAmelCase = 2 _UpperCAmelCase = int(math.sqrt(lowercase ) ) # Size of every segment _UpperCAmelCase = [True] * (end + 1) _UpperCAmelCase = [] while start <= end: if temp[start] is True: in_prime.append(lowercase ) for i in range(start * start ,end + 1 ,lowercase ): _UpperCAmelCase = False start += 1 prime += in_prime _UpperCAmelCase = end + 1 _UpperCAmelCase = min(2 * end ,lowercase ) while low <= n: _UpperCAmelCase = [True] * (high - low + 1) for each in in_prime: _UpperCAmelCase = math.floor(low / each ) * each if t < low: t += each for j in range(lowercase ,high + 1 ,lowercase ): _UpperCAmelCase = False for j in range(len(lowercase ) ): if temp[j] is True: prime.append(j + low ) _UpperCAmelCase = high + 1 _UpperCAmelCase = min(high + end ,lowercase ) return prime print(sieve(1_0**6))
289
1
"""simple docstring""" from datetime import datetime as dt import os from github import Github UpperCAmelCase__ = [ """good first issue""", """good second issue""", """good difficult issue""", """feature request""", """new model""", """wip""", ] def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = Github(os.environ["""GITHUB_TOKEN"""] ) _UpperCAmelCase = g.get_repo("""huggingface/transformers""" ) _UpperCAmelCase = repo.get_issues(state="""open""" ) for issue in open_issues: _UpperCAmelCase = sorted([comment for comment in issue.get_comments()] ,key=lambda lowercase : i.created_at ,reverse=lowercase ) _UpperCAmelCase = comments[0] if len(lowercase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state="""closed""" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) if __name__ == "__main__": main()
289
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _UpperCAmelCase = TapasConfig.from_json_file(lowercase ) # set absolute/relative position embeddings parameter _UpperCAmelCase = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "WTQ": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = True # hparam_utils.py hparams _UpperCAmelCase = 0.66_46_94 _UpperCAmelCase = 0.20_79_51 _UpperCAmelCase = 0.12_11_94 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = 0.0_35_25_13 _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = False # hparam_utils.py hparams _UpperCAmelCase = 36.45_19 _UpperCAmelCase = 0.90_34_21 _UpperCAmelCase = 2_22.0_88 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = 0.76_31_41 _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "TABFACT": _UpperCAmelCase = TapasForSequenceClassification(config=lowercase ) elif task == "MLM": _UpperCAmelCase = TapasForMaskedLM(config=lowercase ) elif task == "INTERMEDIATE_PRETRAINING": _UpperCAmelCase = TapasModel(config=lowercase ) else: raise ValueError(f'''Task {task} not supported.''' ) print(f'''Building PyTorch model from configuration: {config}''' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(lowercase ,lowercase ,lowercase ) # Save pytorch-model (weights and configuration) print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(lowercase ) # Save tokenizer files print(f'''Save tokenizer files to {pytorch_dump_path}''' ) _UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" ,model_max_length=5_12 ) tokenizer.save_pretrained(lowercase ) print("""Used relative position embeddings:""" ,model.config.reset_position_index_per_cell ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
289
1
"""simple docstring""" import warnings warnings.warn( """memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """ """`from accelerate import find_executable_batch_size` to avoid this warning.""", FutureWarning, )
289
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml UpperCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" def run_func(lowercase ): @wraps(lowercase ) def run_in_eager_mode(*lowercase ,**lowercase ): return func(*lowercase ,**lowercase ) @wraps(lowercase ) @tf.function(experimental_compile=lowercase ) def run_in_graph_mode(*lowercase ,**lowercase ): return func(*lowercase ,**lowercase ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = random.Random() _UpperCAmelCase = [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(lowercase ,shape=(batch_size, sequence_length) ,dtype=tf.intaa ) class a ( lowerCAmelCase_ ): _snake_case : TensorFlowBenchmarkArguments _snake_case : PretrainedConfig _snake_case : str = "TensorFlow" @property def lowerCAmelCase_ ( self : Union[str, Any] ): return tf.__version__ def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): # initialize GPU on separate process _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_speed(_inference ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_speed(_train ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase ) _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_memory(_inference ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase ) _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_memory(_train ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _UpperCAmelCase = ( hasattr(__lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , __lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_cls(__lowerCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _UpperCAmelCase = TF_MODEL_MAPPING[config.__class__](__lowerCAmelCase ) # encoder-decoder has vocab size saved differently _UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , training=__lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(__lowerCAmelCase , training=__lowerCAmelCase ) _UpperCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _UpperCAmelCase = ( hasattr(__lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , __lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_cls(__lowerCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _UpperCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCAmelCase ) # encoder-decoder has vocab size saved differently _UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): _UpperCAmelCase = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0] _UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): _UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0] _UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables ) return gradients _UpperCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Any ): with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(__lowerCAmelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _UpperCAmelCase = timeit.repeat( __lowerCAmelCase , repeat=self.args.repeat , number=10 , ) return min(__lowerCAmelCase ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Callable[[], None] ): logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _UpperCAmelCase = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _UpperCAmelCase = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _UpperCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _UpperCAmelCase = nvml.nvmlDeviceGetMemoryInfo(__lowerCAmelCase ) _UpperCAmelCase = meminfo.used _UpperCAmelCase = Memory(__lowerCAmelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _UpperCAmelCase = None else: _UpperCAmelCase = measure_peak_memory_cpu(__lowerCAmelCase ) _UpperCAmelCase = Memory(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else memory_bytes if self.args.trace_memory_line_by_line: _UpperCAmelCase = stop_memory_tracing(__lowerCAmelCase ) if memory is None: _UpperCAmelCase = summary.total else: _UpperCAmelCase = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
289
1
"""simple docstring""" import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCAmelCase__ = """src/diffusers""" UpperCAmelCase__ = """.""" # This is to make sure the diffusers module imported is the one in the repo. UpperCAmelCase__ = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) UpperCAmelCase__ = spec.loader.load_module() def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" return line.startswith(lowercase ) or len(lowercase ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" ,lowercase ) is not None def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = object_name.split(""".""" ) _UpperCAmelCase = 0 # First let's find the module where our object lives. _UpperCAmelCase = parts[i] while i < len(lowercase ) and not os.path.isfile(os.path.join(lowercase ,f'''{module}.py''' ) ): i += 1 if i < len(lowercase ): _UpperCAmelCase = os.path.join(lowercase ,parts[i] ) if i >= len(lowercase ): raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(lowercase ,f'''{module}.py''' ) ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: _UpperCAmelCase = f.readlines() # Now let's find the class / func in the code! _UpperCAmelCase = """""" _UpperCAmelCase = 0 for name in parts[i + 1 :]: while ( line_index < len(lowercase ) and re.search(Rf'''^{indent}(class|def)\s+{name}(\(|\:)''' ,lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(lowercase ): raise ValueError(f''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). _UpperCAmelCase = line_index while line_index < len(lowercase ) and _should_continue(lines[line_index] ,lowercase ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 _UpperCAmelCase = lines[start_index:line_index] return "".join(lowercase ) UpperCAmelCase__ = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") UpperCAmelCase__ = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""") UpperCAmelCase__ = re.compile(r"""<FILL\s+[^>]*>""") def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = code.split("""\n""" ) _UpperCAmelCase = 0 while idx < len(lowercase ) and len(lines[idx] ) == 0: idx += 1 if idx < len(lowercase ): return re.search(R"""^(\s*)\S""" ,lines[idx] ).groups()[0] return "" def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = len(get_indent(lowercase ) ) > 0 if has_indent: _UpperCAmelCase = f'''class Bla:\n{code}''' _UpperCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=1_19 ,preview=lowercase ) _UpperCAmelCase = black.format_str(lowercase ,mode=lowercase ) _UpperCAmelCase , _UpperCAmelCase = style_docstrings_in_code(lowercase ) return result[len("""class Bla:\n""" ) :] if has_indent else result def __UpperCAmelCase ( lowercase ,lowercase=False ): """simple docstring""" with open(lowercase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: _UpperCAmelCase = f.readlines() _UpperCAmelCase = [] _UpperCAmelCase = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(lowercase ): _UpperCAmelCase = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = search.groups() _UpperCAmelCase = find_code_in_diffusers(lowercase ) _UpperCAmelCase = get_indent(lowercase ) _UpperCAmelCase = line_index + 1 if indent == theoretical_indent else line_index + 2 _UpperCAmelCase = theoretical_indent _UpperCAmelCase = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. _UpperCAmelCase = True while line_index < len(lowercase ) and should_continue: line_index += 1 if line_index >= len(lowercase ): break _UpperCAmelCase = lines[line_index] _UpperCAmelCase = _should_continue(lowercase ,lowercase ) and re.search(f'''^{indent}# End copy''' ,lowercase ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 _UpperCAmelCase = lines[start_index:line_index] _UpperCAmelCase = """""".join(lowercase ) # Remove any nested `Copied from` comments to avoid circular copies _UpperCAmelCase = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(lowercase ) is None] _UpperCAmelCase = """\n""".join(lowercase ) # Before comparing, use the `replace_pattern` on the original code. if len(lowercase ) > 0: _UpperCAmelCase = replace_pattern.replace("""with""" ,"""""" ).split(""",""" ) _UpperCAmelCase = [_re_replace_pattern.search(lowercase ) for p in patterns] for pattern in patterns: if pattern is None: continue _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = pattern.groups() _UpperCAmelCase = re.sub(lowercase ,lowercase ,lowercase ) if option.strip() == "all-casing": _UpperCAmelCase = re.sub(obja.lower() ,obja.lower() ,lowercase ) _UpperCAmelCase = re.sub(obja.upper() ,obja.upper() ,lowercase ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line _UpperCAmelCase = blackify(lines[start_index - 1] + theoretical_code ) _UpperCAmelCase = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: _UpperCAmelCase = lines[:start_index] + [theoretical_code] + lines[line_index:] _UpperCAmelCase = start_index + 1 if overwrite and len(lowercase ) > 0: # Warn the user a file has been modified. print(f'''Detected changes, rewriting {filename}.''' ) with open(lowercase ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.writelines(lowercase ) return diffs def __UpperCAmelCase ( lowercase = False ): """simple docstring""" _UpperCAmelCase = glob.glob(os.path.join(lowercase ,"""**/*.py""" ) ,recursive=lowercase ) _UpperCAmelCase = [] for filename in all_files: _UpperCAmelCase = is_copy_consistent(lowercase ,lowercase ) diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(lowercase ) > 0: _UpperCAmelCase = """\n""".join(lowercase ) raise Exception( """Found the following copy inconsistencies:\n""" + diff + """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") UpperCAmelCase__ = parser.parse_args() check_copies(args.fix_and_overwrite)
289
"""simple docstring""" from math import pow def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,): """simple docstring""" if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count _UpperCAmelCase = int(pow(lowercase ,lowercase ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n _UpperCAmelCase , _UpperCAmelCase = backtrack( lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. _UpperCAmelCase , _UpperCAmelCase = backtrack( lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase ) return current_sum, solutions_count def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10): raise ValueError( """Invalid input\n""" """needed_sum must be between 1 and 1000, power between 2 and 10.""" ) return backtrack(lowercase ,lowercase ,1 ,0 ,0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
289
1
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""", # See all DPT models at https://huggingface.co/models?filter=dpt } class a ( lowerCAmelCase_ ): _snake_case : List[str] = 'dpt' def __init__( self : Union[str, Any] , __lowerCAmelCase : int=768 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : List[Any]=3072 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : List[str]=1e-1_2 , __lowerCAmelCase : Any=384 , __lowerCAmelCase : Optional[int]=16 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : int=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=[2, 5, 8, 11] , __lowerCAmelCase : List[Any]="project" , __lowerCAmelCase : str=[4, 2, 1, 0.5] , __lowerCAmelCase : Union[str, Any]=[96, 192, 384, 768] , __lowerCAmelCase : Tuple=256 , __lowerCAmelCase : Any=-1 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=0.4 , __lowerCAmelCase : List[Any]=255 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : List[str]=[1, 1024, 24, 24] , __lowerCAmelCase : Union[str, Any]=[0, 1] , __lowerCAmelCase : str=None , **__lowerCAmelCase : Optional[int] , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = hidden_size _UpperCAmelCase = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info("""Initializing the config with a `BiT` backbone.""" ) _UpperCAmelCase = { """global_padding""": """same""", """layer_type""": """bottleneck""", """depths""": [3, 4, 9], """out_features""": ["""stage1""", """stage2""", """stage3"""], """embedding_dynamic_padding""": True, } _UpperCAmelCase = BitConfig(**__lowerCAmelCase ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): logger.info("""Initializing the config with a `BiT` backbone.""" ) _UpperCAmelCase = BitConfig(**__lowerCAmelCase ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase = backbone_config else: raise ValueError( f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' ) _UpperCAmelCase = backbone_featmap_shape _UpperCAmelCase = neck_ignore_stages if readout_type != "project": raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" ) else: _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = [] _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = qkv_bias _UpperCAmelCase = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" ) _UpperCAmelCase = readout_type _UpperCAmelCase = reassemble_factors _UpperCAmelCase = neck_hidden_sizes _UpperCAmelCase = fusion_hidden_size _UpperCAmelCase = head_in_index _UpperCAmelCase = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) _UpperCAmelCase = use_auxiliary_head _UpperCAmelCase = auxiliary_loss_weight _UpperCAmelCase = semantic_loss_ignore_index _UpperCAmelCase = semantic_classifier_dropout def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: _UpperCAmelCase = self.backbone_config.to_dict() _UpperCAmelCase = self.__class__.model_type return output
289
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } UpperCAmelCase__ = { """b0""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 2_2_4, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 2_4_0, """dropout_rate""": 0.2, """dw_padding""": [1_6], }, """b2""": { """hidden_dim""": 1_4_0_8, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 2_6_0, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 1_6], }, """b3""": { """hidden_dim""": 1_5_3_6, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 3_0_0, """dropout_rate""": 0.3, """dw_padding""": [5, 1_8], }, """b4""": { """hidden_dim""": 1_7_9_2, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 3_8_0, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_0_4_8, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 4_5_6, """dropout_rate""": 0.4, """dw_padding""": [1_3, 2_7], }, """b6""": { """hidden_dim""": 2_3_0_4, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 5_2_8, """dropout_rate""": 0.5, """dw_padding""": [3_1], }, """b7""": { """hidden_dim""": 2_5_6_0, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 6_0_0, """dropout_rate""": 0.5, """dw_padding""": [1_8], }, } def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = EfficientNetConfig() _UpperCAmelCase = CONFIG_MAP[model_name]["""hidden_dim"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""width_coef"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""depth_coef"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""dropout_rate"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""dw_padding"""] _UpperCAmelCase = """huggingface/label-files""" _UpperCAmelCase = """imagenet-1k-id2label.json""" _UpperCAmelCase = 10_00 _UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) ) _UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw ) return im def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = EfficientNetImageProcessor( size={"""height""": size, """width""": size} ,image_mean=[0.4_85, 0.4_56, 0.4_06] ,image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] ,do_center_crop=lowercase ,) return preprocessor def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] _UpperCAmelCase = sorted(set(lowercase ) ) _UpperCAmelCase = len(lowercase ) _UpperCAmelCase = {b: str(lowercase ) for b, i in zip(lowercase ,range(lowercase ) )} _UpperCAmelCase = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: _UpperCAmelCase = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) _UpperCAmelCase = {} for item in rename_keys: if item[0] in original_param_names: _UpperCAmelCase = """efficientnet.""" + item[1] _UpperCAmelCase = """classifier.weight""" _UpperCAmelCase = """classifier.bias""" return key_mapping def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue _UpperCAmelCase = key_mapping[key] if "_conv" in key and "kernel" in key: _UpperCAmelCase = torch.from_numpy(lowercase ).permute(3 ,2 ,0 ,1 ) elif "depthwise_kernel" in key: _UpperCAmelCase = torch.from_numpy(lowercase ).permute(2 ,3 ,0 ,1 ) elif "kernel" in key: _UpperCAmelCase = torch.from_numpy(np.transpose(lowercase ) ) else: _UpperCAmelCase = torch.from_numpy(lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase ) @torch.no_grad() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = model_classes[model_name]( include_top=lowercase ,weights="""imagenet""" ,input_tensor=lowercase ,input_shape=lowercase ,pooling=lowercase ,classes=10_00 ,classifier_activation="""softmax""" ,) _UpperCAmelCase = original_model.trainable_variables _UpperCAmelCase = original_model.non_trainable_variables _UpperCAmelCase = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: _UpperCAmelCase = param.numpy() _UpperCAmelCase = list(tf_params.keys() ) # Load HuggingFace model _UpperCAmelCase = get_efficientnet_config(lowercase ) _UpperCAmelCase = EfficientNetForImageClassification(lowercase ).eval() _UpperCAmelCase = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) _UpperCAmelCase = rename_keys(lowercase ) replace_params(lowercase ,lowercase ,lowercase ) # Initialize preprocessor and preprocess input image _UpperCAmelCase = convert_image_processor(lowercase ) _UpperCAmelCase = preprocessor(images=prepare_img() ,return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): _UpperCAmelCase = hf_model(**lowercase ) _UpperCAmelCase = outputs.logits.detach().numpy() # Original model inference _UpperCAmelCase = False _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST ) _UpperCAmelCase = image.img_to_array(lowercase ) _UpperCAmelCase = np.expand_dims(lowercase ,axis=0 ) _UpperCAmelCase = original_model.predict(lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase ,lowercase ,atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase ): os.mkdir(lowercase ) # Save converted model and image processor hf_model.save_pretrained(lowercase ) preprocessor.save_pretrained(lowercase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) _UpperCAmelCase = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowercase ) hf_model.push_to_hub(lowercase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") UpperCAmelCase__ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
289
1
"""simple docstring""" from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Any = 'efficientnet' def __init__( self : Any , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 600 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2560 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.001 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : List[Any] , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = width_coefficient _UpperCAmelCase = depth_coefficient _UpperCAmelCase = depth_divisor _UpperCAmelCase = kernel_sizes _UpperCAmelCase = in_channels _UpperCAmelCase = out_channels _UpperCAmelCase = depthwise_padding _UpperCAmelCase = strides _UpperCAmelCase = num_block_repeats _UpperCAmelCase = expand_ratios _UpperCAmelCase = squeeze_expansion_ratio _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dim _UpperCAmelCase = pooling_type _UpperCAmelCase = initializer_range _UpperCAmelCase = batch_norm_eps _UpperCAmelCase = batch_norm_momentum _UpperCAmelCase = dropout_rate _UpperCAmelCase = drop_connect_rate _UpperCAmelCase = sum(__lowerCAmelCase ) * 4 class a ( lowerCAmelCase_ ): _snake_case : Dict = version.parse('1.11' ) @property def lowerCAmelCase_ ( self : Any ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase_ ( self : int ): return 1e-5
289
"""simple docstring""" from __future__ import annotations from collections import Counter from random import random class a : def __init__( self : Union[str, Any] ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : float ): if nodea not in self.connections: self.add_node(__lowerCAmelCase ) if nodea not in self.connections: self.add_node(__lowerCAmelCase ) _UpperCAmelCase = probability def lowerCAmelCase_ ( self : Optional[Any] ): return list(self.connections ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ): _UpperCAmelCase = 0 _UpperCAmelCase = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(lowercase ,lowercase ,lowercase ) _UpperCAmelCase = Counter(graph.get_nodes() ) _UpperCAmelCase = start for _ in range(lowercase ): _UpperCAmelCase = graph.transition(lowercase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
289
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" def update_area_of_max_square(lowercase ,lowercase ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 _UpperCAmelCase = update_area_of_max_square(lowercase ,col + 1 ) _UpperCAmelCase = update_area_of_max_square(row + 1 ,col + 1 ) _UpperCAmelCase = update_area_of_max_square(row + 1 ,lowercase ) if mat[row][col]: _UpperCAmelCase = 1 + min([right, diagonal, down] ) _UpperCAmelCase = max(largest_square_area[0] ,lowercase ) return sub_problem_sol else: return 0 _UpperCAmelCase = [0] update_area_of_max_square(0 ,0 ) return largest_square_area[0] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" def update_area_of_max_square_using_dp_array( lowercase ,lowercase ,lowercase ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] _UpperCAmelCase = update_area_of_max_square_using_dp_array(lowercase ,col + 1 ,lowercase ) _UpperCAmelCase = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,lowercase ) _UpperCAmelCase = update_area_of_max_square_using_dp_array(row + 1 ,lowercase ,lowercase ) if mat[row][col]: _UpperCAmelCase = 1 + min([right, diagonal, down] ) _UpperCAmelCase = max(largest_square_area[0] ,lowercase ) _UpperCAmelCase = sub_problem_sol return sub_problem_sol else: return 0 _UpperCAmelCase = [0] _UpperCAmelCase = [[-1] * cols for _ in range(lowercase )] update_area_of_max_square_using_dp_array(0 ,0 ,lowercase ) return largest_square_area[0] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = [[0] * (cols + 1) for _ in range(rows + 1 )] _UpperCAmelCase = 0 for row in range(rows - 1 ,-1 ,-1 ): for col in range(cols - 1 ,-1 ,-1 ): _UpperCAmelCase = dp_array[row][col + 1] _UpperCAmelCase = dp_array[row + 1][col + 1] _UpperCAmelCase = dp_array[row + 1][col] if mat[row][col] == 1: _UpperCAmelCase = 1 + min(lowercase ,lowercase ,lowercase ) _UpperCAmelCase = max(dp_array[row][col] ,lowercase ) else: _UpperCAmelCase = 0 return largest_square_area def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = [0] * (cols + 1) _UpperCAmelCase = [0] * (cols + 1) _UpperCAmelCase = 0 for row in range(rows - 1 ,-1 ,-1 ): for col in range(cols - 1 ,-1 ,-1 ): _UpperCAmelCase = current_row[col + 1] _UpperCAmelCase = next_row[col + 1] _UpperCAmelCase = next_row[col] if mat[row][col] == 1: _UpperCAmelCase = 1 + min(lowercase ,lowercase ,lowercase ) _UpperCAmelCase = max(current_row[col] ,lowercase ) else: _UpperCAmelCase = 0 _UpperCAmelCase = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
289
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a ( unittest.TestCase ): def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Optional[Any]=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : List[str]=400 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]=True , ): _UpperCAmelCase = size if size is not None else {"""shortest_edge""": 20} _UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = do_flip_channel_order def lowerCAmelCase_ ( self : List[str] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = MobileViTImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = MobileViTImageProcessingTester(self ) @property def lowerCAmelCase_ ( self : Tuple ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_flip_channel_order""" ) ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def lowerCAmelCase_ ( self : List[str] ): pass def lowerCAmelCase_ ( self : Dict ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : str ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : Optional[int] ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
289
1
"""simple docstring""" from typing import Any def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,): """simple docstring""" _validation( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,) # Creates data structures and fill initial step _UpperCAmelCase = {} _UpperCAmelCase = {} for state in states_space: _UpperCAmelCase = observations_space[0] _UpperCAmelCase = ( initial_probabilities[state] * emission_probabilities[state][observation] ) _UpperCAmelCase = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 ,len(lowercase ) ): _UpperCAmelCase = observations_space[o] _UpperCAmelCase = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function _UpperCAmelCase = """""" _UpperCAmelCase = -1 for k_state in states_space: _UpperCAmelCase = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: _UpperCAmelCase = probability _UpperCAmelCase = k_state # Update probabilities and pointers dicts _UpperCAmelCase = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) _UpperCAmelCase = arg_max # The final observation _UpperCAmelCase = observations_space[len(lowercase ) - 1] # argmax for given final observation _UpperCAmelCase = """""" _UpperCAmelCase = -1 for k_state in states_space: _UpperCAmelCase = probabilities[(k_state, final_observation)] if probability > max_probability: _UpperCAmelCase = probability _UpperCAmelCase = k_state _UpperCAmelCase = arg_max # Process pointers backwards _UpperCAmelCase = last_state _UpperCAmelCase = [] for o in range(len(lowercase ) - 1 ,-1 ,-1 ): result.append(lowercase ) _UpperCAmelCase = pointers[previous, observations_space[o]] result.reverse() return result def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,): """simple docstring""" _validate_not_empty( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,) _validate_lists(lowercase ,lowercase ) _validate_dicts( lowercase ,lowercase ,lowercase ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,): """simple docstring""" if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("""There's an empty parameter""" ) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _validate_list(lowercase ,"""observations_space""" ) _validate_list(lowercase ,"""states_space""" ) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if not isinstance(_object ,lowercase ): _UpperCAmelCase = f'''{var_name} must be a list''' raise ValueError(lowercase ) else: for x in _object: if not isinstance(lowercase ,lowercase ): _UpperCAmelCase = f'''{var_name} must be a list of strings''' raise ValueError(lowercase ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,): """simple docstring""" _validate_dict(lowercase ,"""initial_probabilities""" ,lowercase ) _validate_nested_dict(lowercase ,"""transition_probabilities""" ) _validate_nested_dict(lowercase ,"""emission_probabilities""" ) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _validate_dict(_object ,lowercase ,lowercase ) for x in _object.values(): _validate_dict(lowercase ,lowercase ,lowercase ,lowercase ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase = False ): """simple docstring""" if not isinstance(_object ,lowercase ): _UpperCAmelCase = f'''{var_name} must be a dict''' raise ValueError(lowercase ) if not all(isinstance(lowercase ,lowercase ) for x in _object ): _UpperCAmelCase = f'''{var_name} all keys must be strings''' raise ValueError(lowercase ) if not all(isinstance(lowercase ,lowercase ) for x in _object.values() ): _UpperCAmelCase = """nested dictionary """ if nested else """""" _UpperCAmelCase = f'''{var_name} {nested_text}all values must be {value_type.__name__}''' raise ValueError(lowercase ) if __name__ == "__main__": from doctest import testmod testmod()
289
"""simple docstring""" from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Any = 'efficientnet' def __init__( self : Any , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 600 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2560 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.001 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : List[Any] , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = width_coefficient _UpperCAmelCase = depth_coefficient _UpperCAmelCase = depth_divisor _UpperCAmelCase = kernel_sizes _UpperCAmelCase = in_channels _UpperCAmelCase = out_channels _UpperCAmelCase = depthwise_padding _UpperCAmelCase = strides _UpperCAmelCase = num_block_repeats _UpperCAmelCase = expand_ratios _UpperCAmelCase = squeeze_expansion_ratio _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dim _UpperCAmelCase = pooling_type _UpperCAmelCase = initializer_range _UpperCAmelCase = batch_norm_eps _UpperCAmelCase = batch_norm_momentum _UpperCAmelCase = dropout_rate _UpperCAmelCase = drop_connect_rate _UpperCAmelCase = sum(__lowerCAmelCase ) * 4 class a ( lowerCAmelCase_ ): _snake_case : Dict = version.parse('1.11' ) @property def lowerCAmelCase_ ( self : Any ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase_ ( self : int ): return 1e-5
289
1
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase__ = 1_6 UpperCAmelCase__ = 3_2 def __UpperCAmelCase ( lowercase ,lowercase = 16 ): """simple docstring""" _UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) _UpperCAmelCase = DataLoader( tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase__ = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1": _UpperCAmelCase = 2 # Initialize accelerator _UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config["""lr"""] _UpperCAmelCase = int(config["""num_epochs"""] ) _UpperCAmelCase = int(config["""seed"""] ) _UpperCAmelCase = int(config["""batch_size"""] ) _UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowercase ) def inner_training_loop(lowercase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(lowercase ,lowercase ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowercase ,references=lowercase ,) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' ,lowercase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" ,) parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowercase ,lowercase ) if __name__ == "__main__": main()
289
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class a : def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : str=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Optional[int]=16 , __lowerCAmelCase : Dict=36 , __lowerCAmelCase : Optional[Any]=6 , __lowerCAmelCase : List[str]=6 , __lowerCAmelCase : Union[str, Any]=6 , __lowerCAmelCase : str=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Any=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = embedding_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_hidden_groups _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Union[str, Any] ): return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Any ): _UpperCAmelCase = AlbertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ): _UpperCAmelCase = AlbertForPreTraining(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , sentence_order_label=__lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ): _UpperCAmelCase = AlbertForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ): _UpperCAmelCase = AlbertForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = AlbertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = AlbertForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = AlbertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : str = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) _snake_case : Tuple = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) _snake_case : Dict = True def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any]=False ): _UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): _UpperCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase ) _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = AlbertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Optional[int] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase = type self.model_tester.create_and_check_model(*__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = AlbertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = AlbertModel.from_pretrained("""albert-base-v2""" ) _UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] _UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
289
1
"""simple docstring""" import pytest UpperCAmelCase__ = """__dummy_dataset1__""" UpperCAmelCase__ = """ import json import os import datasets REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\" URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { \"tokens\": datasets.Sequence(datasets.Value(\"string\")), \"ner_tags\": datasets.Sequence( datasets.features.ClassLabel( names=[ \"O\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\", \"B-LOC\", \"I-LOC\", ] ) ), \"langs\": datasets.Sequence(datasets.Value(\"string\")), \"spans\": datasets.Sequence(datasets.Value(\"string\")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}), ] def _generate_examples(self, filepath): with open(filepath, \"r\", encoding=\"utf-8\") as f: for i, line in enumerate(f): yield i, json.loads(line) """ @pytest.fixture def __UpperCAmelCase ( ): """simple docstring""" return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __UpperCAmelCase ( ): """simple docstring""" return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = dataset_loading_script_name _UpperCAmelCase = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=lowercase ) _UpperCAmelCase = script_dir / f'''{script_name}.py''' with open(lowercase ,"""w""" ) as f: f.write(lowercase ) return str(lowercase )
289
"""simple docstring""" UpperCAmelCase__ = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" # Return True if there is node that has not iterated. _UpperCAmelCase = [False] * len(lowercase ) _UpperCAmelCase = [s] _UpperCAmelCase = True while queue: _UpperCAmelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase ) _UpperCAmelCase = True _UpperCAmelCase = u return visited[t] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = [-1] * (len(lowercase )) _UpperCAmelCase = 0 _UpperCAmelCase = [] _UpperCAmelCase = [i[:] for i in graph] # Record original cut, copy. while bfs(lowercase ,lowercase ,lowercase ,lowercase ): _UpperCAmelCase = float("""Inf""" ) _UpperCAmelCase = sink while s != source: # Find the minimum value in select path _UpperCAmelCase = min(lowercase ,graph[parent[s]][s] ) _UpperCAmelCase = parent[s] max_flow += path_flow _UpperCAmelCase = sink while v != source: _UpperCAmelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCAmelCase = parent[v] for i in range(len(lowercase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
289
1
"""simple docstring""" import logging from transformers.configuration_utils import PretrainedConfig UpperCAmelCase__ = logging.getLogger(__name__) class a ( lowerCAmelCase_ ): _snake_case : List[str] = 'masked_bert' def __init__( self : List[str] , __lowerCAmelCase : Any=3_0522 , __lowerCAmelCase : Dict=768 , __lowerCAmelCase : Tuple=12 , __lowerCAmelCase : Optional[int]=12 , __lowerCAmelCase : str=3072 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Optional[int]=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : Dict=1e-1_2 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : Optional[Any]="topK" , __lowerCAmelCase : str="constant" , __lowerCAmelCase : Dict=0.0 , **__lowerCAmelCase : Dict , ): super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = pruning_method _UpperCAmelCase = mask_init _UpperCAmelCase = mask_scale
289
"""simple docstring""" import math class a : def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ): _UpperCAmelCase = 0.0 _UpperCAmelCase = 0.0 for i in range(len(__lowerCAmelCase ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : list[list[int | float]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : float ): for i in range(len(__lowerCAmelCase ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def __UpperCAmelCase ( ): """simple docstring""" # Training Examples ( m, n ) _UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _UpperCAmelCase = SelfOrganizingMap() _UpperCAmelCase = 3 _UpperCAmelCase = 0.5 for _ in range(lowercase ): for j in range(len(lowercase ) ): # training sample _UpperCAmelCase = training_samples[j] # Compute the winning vector _UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase ) # Update the winning vector _UpperCAmelCase = self_organizing_map.update(lowercase ,lowercase ,lowercase ,lowercase ) # classify test sample _UpperCAmelCase = [0, 0, 0, 1] _UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase ) # results print(f'''Clusters that the test sample belongs to : {winner}''' ) print(f'''Weights that have been trained : {weights}''' ) # running the main() function if __name__ == "__main__": main()
289
1
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow UpperCAmelCase__ = logging.getLogger() @unittest.skip('Temporarily disable the doc tests.' ) @require_torch @require_tf @slow class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Path , __lowerCAmelCase : Union[str, None] = None , __lowerCAmelCase : Union[List[str], None] = None , __lowerCAmelCase : Union[str, List[str], None] = None , __lowerCAmelCase : bool = True , ): _UpperCAmelCase = [file for file in os.listdir(__lowerCAmelCase ) if os.path.isfile(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )] if identifier is not None: _UpperCAmelCase = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(__lowerCAmelCase , __lowerCAmelCase ): for n_ in n_identifier: _UpperCAmelCase = [file for file in files if n_ not in file] else: _UpperCAmelCase = [file for file in files if n_identifier not in file] _UpperCAmelCase = ignore_files or [] ignore_files.append("""__init__.py""" ) _UpperCAmelCase = [file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , __lowerCAmelCase ) if only_modules: _UpperCAmelCase = file.split(""".""" )[0] try: _UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = doctest.DocTestSuite(__lowerCAmelCase ) _UpperCAmelCase = unittest.TextTestRunner().run(__lowerCAmelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: _UpperCAmelCase = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = Path("""src/transformers""" ) _UpperCAmelCase = """modeling""" _UpperCAmelCase = [ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(__lowerCAmelCase , identifier=__lowerCAmelCase , ignore_files=__lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = Path("""src/transformers""" ) _UpperCAmelCase = """tokenization""" self.analyze_directory(__lowerCAmelCase , identifier=__lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = Path("""src/transformers""" ) _UpperCAmelCase = """configuration""" self.analyze_directory(__lowerCAmelCase , identifier=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = Path("""src/transformers""" ) _UpperCAmelCase = ["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(__lowerCAmelCase , n_identifier=__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = Path("""docs/source""" ) _UpperCAmelCase = ["""favicon.ico"""] self.analyze_directory(__lowerCAmelCase , ignore_files=__lowerCAmelCase , only_modules=__lowerCAmelCase )
289
"""simple docstring""" import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class a : def __init__( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=99 , __lowerCAmelCase : int=64 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Union[str, Any]=64 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : str=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def lowerCAmelCase_ ( self : Union[str, Any] ): return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Optional[int] ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ): _UpperCAmelCase = MPNetModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ): _UpperCAmelCase = MPNetForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = MPNetForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = MPNetForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = MPNetForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.prepare_config_and_inputs() ((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : List[Any] = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) _snake_case : Union[str, Any] = ( { 'feature-extraction': MPNetModel, 'fill-mask': MPNetForMaskedLM, 'question-answering': MPNetForQuestionAnswering, 'text-classification': MPNetForSequenceClassification, 'token-classification': MPNetForTokenClassification, 'zero-shot': MPNetForSequenceClassification, } if is_torch_available() else {} ) _snake_case : int = False _snake_case : List[Any] = True def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = MPNetModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Dict ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*__lowerCAmelCase ) @require_torch class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = MPNetModel.from_pretrained("""microsoft/mpnet-base""" ) _UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCAmelCase = model(__lowerCAmelCase )[0] _UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor( [[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
289
1
"""simple docstring""" from __future__ import annotations from math import pi def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if inductance < 0: raise ValueError("""Inductance cannot be negative""" ) if frequency < 0: raise ValueError("""Frequency cannot be negative""" ) if reactance < 0: raise ValueError("""Inductive reactance cannot be negative""" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
289
"""simple docstring""" UpperCAmelCase__ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) UpperCAmelCase__ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 1_2, """Pm""": 1_5, """Em""": 1_8, """Zm""": 2_1, """Ym""": 2_4, } def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = from_type.lower().strip("""s""" ) _UpperCAmelCase = to_type.lower().strip("""s""" ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) if from_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'from_type\' value: {from_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) if to_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'to_type\' value: {to_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) _UpperCAmelCase = METRIC_CONVERSION[from_sanitized] _UpperCAmelCase = METRIC_CONVERSION[to_sanitized] _UpperCAmelCase = 1 if from_exponent > to_exponent: _UpperCAmelCase = from_exponent - to_exponent else: _UpperCAmelCase = -(to_exponent - from_exponent) return value * pow(10 ,lowercase ) if __name__ == "__main__": from doctest import testmod testmod()
289
1
"""simple docstring""" import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel UpperCAmelCase__ = False UpperCAmelCase__ = True UpperCAmelCase__ = False if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument( """--repo_path""", default=None, type=str, required=True, help="""The config json file corresponding to the architecture.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") UpperCAmelCase__ = parser.parse_args() UpperCAmelCase__ = { """image_size""": """sample_size""", """num_res_blocks""": """layers_per_block""", """block_channels""": """block_out_channels""", """down_blocks""": """down_block_types""", """up_blocks""": """up_block_types""", """downscale_freq_shift""": """freq_shift""", """resnet_num_groups""": """norm_num_groups""", """resnet_act_fn""": """act_fn""", """resnet_eps""": """norm_eps""", """num_head_channels""": """attention_head_dim""", } UpperCAmelCase__ = { """time_steps""": """time_proj""", """mid""": """mid_block""", """downsample_blocks""": """down_blocks""", """upsample_blocks""": """up_blocks""", } UpperCAmelCase__ = """""" if has_file(args.repo_path, """config.json""") else """unet""" with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader: UpperCAmelCase__ = reader.read() UpperCAmelCase__ = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, """config.json"""): UpperCAmelCase__ = UNetaDModel(**config) else: UpperCAmelCase__ = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel UpperCAmelCase__ = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) UpperCAmelCase__ = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: UpperCAmelCase__ = config[key] del config[key] UpperCAmelCase__ = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]] UpperCAmelCase__ = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]] if do_only_weights: UpperCAmelCase__ = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin""")) UpperCAmelCase__ = {} for param_key, param_value in state_dict.items(): if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""): continue UpperCAmelCase__ = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split(""".""")[0] == key: UpperCAmelCase__ = param_value UpperCAmelCase__ = True if not has_changed: UpperCAmelCase__ = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
289
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase__ = 1_6 UpperCAmelCase__ = 3_2 def __UpperCAmelCase ( lowercase ,lowercase = 16 ): """simple docstring""" _UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) _UpperCAmelCase = DataLoader( tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase__ = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1": _UpperCAmelCase = 2 # Initialize accelerator _UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config["""lr"""] _UpperCAmelCase = int(config["""num_epochs"""] ) _UpperCAmelCase = int(config["""seed"""] ) _UpperCAmelCase = int(config["""batch_size"""] ) _UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowercase ) def inner_training_loop(lowercase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(lowercase ,lowercase ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowercase ,references=lowercase ,) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' ,lowercase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" ,) parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowercase ,lowercase ) if __name__ == "__main__": main()
289
1
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): UpperCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right UpperCAmelCase__ = 1_2_8_0_2_2 UpperCAmelCase__ = 1_2_8_0_2_8 @require_sentencepiece class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : List[Any] = MaMaaaTokenizer _snake_case : int = False _snake_case : List[str] = False _snake_case : Any = True def lowerCAmelCase_ ( self : List[Any] ): super().setUp() _UpperCAmelCase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] _UpperCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) _UpperCAmelCase = Path(self.tmpdirname ) save_json(__lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(__lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""spm_file"""] ) _UpperCAmelCase = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase_ ( self : Union[str, Any] , **__lowerCAmelCase : str ): return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Any ): return ( "This is a test", "This is a test", ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = """</s>""" _UpperCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase ) def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """</s>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """<s>""" ) self.assertEqual(len(__lowerCAmelCase ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip("""Skip this test while all models are still to be uploaded.""" ) def lowerCAmelCase_ ( self : Tuple ): pass def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [2, 3, 4, 5, 6] , ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) _UpperCAmelCase = tokenizer.convert_tokens_to_string(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , """This is a test""" ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): # fmt: off _UpperCAmelCase = {"""input_ids""": [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCAmelCase , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , ) @require_torch @require_sentencepiece @require_tokenizers class a ( unittest.TestCase ): _snake_case : List[str] = 'facebook/m2m100_418M' _snake_case : Optional[int] = [ 'In my opinion, there are two levels of response from the French government.', 'NSA Affair Emphasizes Complete Lack of Debate on Intelligence', ] _snake_case : int = [ 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', ] # fmt: off _snake_case : Optional[int] = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2] @classmethod def lowerCAmelCase_ ( cls : Any ): _UpperCAmelCase = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" ) _UpperCAmelCase = 1 return cls def lowerCAmelCase_ ( self : Dict ): self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 12_8006 ) self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 12_8022 ) self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 12_8076 ) self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 12_8063 ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = self.tokenizer.get_vocab() self.assertEqual(len(__lowerCAmelCase ) , self.tokenizer.vocab_size ) self.assertEqual(vocab["""<unk>"""] , 3 ) self.assertIn(self.tokenizer.get_lang_token("""en""" ) , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = """en""" _UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase ) def lowerCAmelCase_ ( self : int ): self.assertIn(__lowerCAmelCase , self.tokenizer.all_special_ids ) # fmt: off _UpperCAmelCase = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2] # fmt: on _UpperCAmelCase = self.tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) _UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertNotIn(self.tokenizer.eos_token , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(__lowerCAmelCase ) _UpperCAmelCase = MaMaaaTokenizer.from_pretrained(__lowerCAmelCase ) self.assertDictEqual(new_tok.lang_token_to_id , __lowerCAmelCase ) @require_torch def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = """en""" _UpperCAmelCase = """fr""" _UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , return_tensors="""pt""" ) _UpperCAmelCase = shift_tokens_right( batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: _UpperCAmelCase = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = """mr""" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) _UpperCAmelCase = """zh""" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = """mr""" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) _UpperCAmelCase = """zh""" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , { # en_XX, A, test, EOS """input_ids""": [[12_8022, 58, 4183, 2]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 12_8006, } , )
289
"""simple docstring""" import warnings warnings.warn( """memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """ """`from accelerate import find_executable_batch_size` to avoid this warning.""", FutureWarning, )
289
1
"""simple docstring""" from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker UpperCAmelCase__ = """CompVis/stable-diffusion-v1-1""" UpperCAmelCase__ = """CompVis/stable-diffusion-v1-2""" UpperCAmelCase__ = """CompVis/stable-diffusion-v1-3""" UpperCAmelCase__ = """CompVis/stable-diffusion-v1-4""" class a ( lowerCAmelCase_ ): def __init__( self : Optional[int] , __lowerCAmelCase : AutoencoderKL , __lowerCAmelCase : CLIPTextModel , __lowerCAmelCase : CLIPTokenizer , __lowerCAmelCase : UNetaDConditionModel , __lowerCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __lowerCAmelCase : StableDiffusionSafetyChecker , __lowerCAmelCase : CLIPImageProcessor , __lowerCAmelCase : bool = True , ): super()._init_() _UpperCAmelCase = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase ) _UpperCAmelCase = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase ) _UpperCAmelCase = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase ) _UpperCAmelCase = StableDiffusionPipeline( vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , requires_safety_checker=__lowerCAmelCase , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def lowerCAmelCase_ ( self : Dict ): return {k: getattr(self , __lowerCAmelCase ) for k in self.config.keys() if not k.startswith("""_""" )} def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _UpperCAmelCase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__lowerCAmelCase ) def lowerCAmelCase_ ( self : int ): self.enable_attention_slicing(__lowerCAmelCase ) @torch.no_grad() def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Union[str, List[str]] , __lowerCAmelCase : int = 512 , __lowerCAmelCase : int = 512 , __lowerCAmelCase : int = 50 , __lowerCAmelCase : float = 7.5 , __lowerCAmelCase : Optional[Union[str, List[str]]] = None , __lowerCAmelCase : Optional[int] = 1 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCAmelCase : int = 1 , **__lowerCAmelCase : Tuple , ): return self.pipea( prompt=__lowerCAmelCase , height=__lowerCAmelCase , width=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , output_type=__lowerCAmelCase , return_dict=__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=__lowerCAmelCase , **__lowerCAmelCase , ) @torch.no_grad() def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Union[str, List[str]] , __lowerCAmelCase : int = 512 , __lowerCAmelCase : int = 512 , __lowerCAmelCase : int = 50 , __lowerCAmelCase : float = 7.5 , __lowerCAmelCase : Optional[Union[str, List[str]]] = None , __lowerCAmelCase : Optional[int] = 1 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCAmelCase : int = 1 , **__lowerCAmelCase : Any , ): return self.pipea( prompt=__lowerCAmelCase , height=__lowerCAmelCase , width=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , output_type=__lowerCAmelCase , return_dict=__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=__lowerCAmelCase , **__lowerCAmelCase , ) @torch.no_grad() def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Union[str, List[str]] , __lowerCAmelCase : int = 512 , __lowerCAmelCase : int = 512 , __lowerCAmelCase : int = 50 , __lowerCAmelCase : float = 7.5 , __lowerCAmelCase : Optional[Union[str, List[str]]] = None , __lowerCAmelCase : Optional[int] = 1 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCAmelCase : int = 1 , **__lowerCAmelCase : List[Any] , ): return self.pipea( prompt=__lowerCAmelCase , height=__lowerCAmelCase , width=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , output_type=__lowerCAmelCase , return_dict=__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=__lowerCAmelCase , **__lowerCAmelCase , ) @torch.no_grad() def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Union[str, List[str]] , __lowerCAmelCase : int = 512 , __lowerCAmelCase : int = 512 , __lowerCAmelCase : int = 50 , __lowerCAmelCase : float = 7.5 , __lowerCAmelCase : Optional[Union[str, List[str]]] = None , __lowerCAmelCase : Optional[int] = 1 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCAmelCase : int = 1 , **__lowerCAmelCase : Union[str, Any] , ): return self.pipea( prompt=__lowerCAmelCase , height=__lowerCAmelCase , width=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , output_type=__lowerCAmelCase , return_dict=__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=__lowerCAmelCase , **__lowerCAmelCase , ) @torch.no_grad() def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Union[str, List[str]] , __lowerCAmelCase : int = 512 , __lowerCAmelCase : int = 512 , __lowerCAmelCase : int = 50 , __lowerCAmelCase : float = 7.5 , __lowerCAmelCase : Optional[Union[str, List[str]]] = None , __lowerCAmelCase : Optional[int] = 1 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCAmelCase : int = 1 , **__lowerCAmelCase : str , ): _UpperCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu""" self.to(__lowerCAmelCase ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' ) # Get first result from Stable Diffusion Checkpoint v1.1 _UpperCAmelCase = self.textaimg_sda_a( prompt=__lowerCAmelCase , height=__lowerCAmelCase , width=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , output_type=__lowerCAmelCase , return_dict=__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=__lowerCAmelCase , **__lowerCAmelCase , ) # Get first result from Stable Diffusion Checkpoint v1.2 _UpperCAmelCase = self.textaimg_sda_a( prompt=__lowerCAmelCase , height=__lowerCAmelCase , width=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , output_type=__lowerCAmelCase , return_dict=__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=__lowerCAmelCase , **__lowerCAmelCase , ) # Get first result from Stable Diffusion Checkpoint v1.3 _UpperCAmelCase = self.textaimg_sda_a( prompt=__lowerCAmelCase , height=__lowerCAmelCase , width=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , output_type=__lowerCAmelCase , return_dict=__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=__lowerCAmelCase , **__lowerCAmelCase , ) # Get first result from Stable Diffusion Checkpoint v1.4 _UpperCAmelCase = self.textaimg_sda_a( prompt=__lowerCAmelCase , height=__lowerCAmelCase , width=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , output_type=__lowerCAmelCase , return_dict=__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=__lowerCAmelCase , **__lowerCAmelCase , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
289
"""simple docstring""" import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin UpperCAmelCase__ = logging.get_logger(__name__) enable_full_determinism() class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = UNetaDModel _snake_case : List[str] = 'sample' @property def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (32, 32) _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : List[Any] ): return (3, 32, 32) @property def lowerCAmelCase_ ( self : Optional[Any] ): return (3, 32, 32) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = { """block_out_channels""": (32, 64), """down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""), """up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""), """attention_head_dim""": 3, """out_channels""": 3, """in_channels""": 3, """layers_per_block""": 2, """sample_size""": 32, } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : int = UNetaDModel _snake_case : Optional[Any] = 'sample' @property def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = 4 _UpperCAmelCase = 4 _UpperCAmelCase = (32, 32) _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : Optional[Any] ): return (4, 32, 32) @property def lowerCAmelCase_ ( self : Dict ): return (4, 32, 32) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = { """sample_size""": 32, """in_channels""": 4, """out_channels""": 4, """layers_per_block""": 2, """block_out_channels""": (32, 64), """attention_head_dim""": 32, """down_block_types""": ("""DownBlock2D""", """DownBlock2D"""), """up_block_types""": ("""UpBlock2D""", """UpBlock2D"""), } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) _UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model.to(__lowerCAmelCase ) _UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self : str ): # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model_accelerate.to(__lowerCAmelCase ) model_accelerate.eval() _UpperCAmelCase = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) _UpperCAmelCase = noise.to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) _UpperCAmelCase = model_accelerate(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained( """fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase , low_cpu_mem_usage=__lowerCAmelCase ) model_normal_load.to(__lowerCAmelCase ) model_normal_load.eval() _UpperCAmelCase = model_normal_load(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ) model.eval() model.to(__lowerCAmelCase ) _UpperCAmelCase = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) _UpperCAmelCase = noise.to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) ) class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[Any] = UNetaDModel _snake_case : str = 'sample' @property def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str=(32, 32) ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : Any ): return (3, 32, 32) @property def lowerCAmelCase_ ( self : Union[str, Any] ): return (3, 32, 32) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = { """block_out_channels""": [32, 64, 64, 64], """in_channels""": 3, """layers_per_block""": 1, """out_channels""": 3, """time_embedding_type""": """fourier""", """norm_eps""": 1e-6, """mid_block_scale_factor""": math.sqrt(2.0 ), """norm_num_groups""": None, """down_block_types""": [ """SkipDownBlock2D""", """AttnSkipDownBlock2D""", """SkipDownBlock2D""", """SkipDownBlock2D""", ], """up_block_types""": [ """SkipUpBlock2D""", """SkipUpBlock2D""", """AttnSkipUpBlock2D""", """SkipUpBlock2D""", ], } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict @slow def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) _UpperCAmelCase = self.dummy_input _UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(__lowerCAmelCase ) _UpperCAmelCase = noise _UpperCAmelCase = model(**__lowerCAmelCase ) assert image is not None, "Make sure output is not None" @slow def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ) model.to(__lowerCAmelCase ) _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (256, 256) _UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" ) model.to(__lowerCAmelCase ) _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (32, 32) _UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def lowerCAmelCase_ ( self : List[str] ): # not required for this model pass
289
1
"""simple docstring""" import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py UpperCAmelCase__ = """src/diffusers""" # Matches is_xxx_available() UpperCAmelCase__ = re.compile(r"""is\_([a-z_]*)_available\(\)""") # Matches from xxx import bla UpperCAmelCase__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") UpperCAmelCase__ = """ {0} = None """ UpperCAmelCase__ = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, {1}) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, {1}) """ UpperCAmelCase__ = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = _re_backend.findall(lowercase ) if len(lowercase ) == 0: return None return "_and_".join(lowercase ) def __UpperCAmelCase ( ): """simple docstring""" with open(os.path.join(lowercase ,"""__init__.py""" ) ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: _UpperCAmelCase = f.readlines() # Get to the point we do the actual imports for type checking _UpperCAmelCase = 0 _UpperCAmelCase = {} # Go through the end of the file while line_index < len(lowercase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block _UpperCAmelCase = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 _UpperCAmelCase = [] # Until we unindent, add backend objects to the list while line_index < len(lowercase ) and len(lines[line_index] ) > 1: _UpperCAmelCase = lines[line_index] _UpperCAmelCase = _re_single_line_import.search(lowercase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(lowercase ) > 0: _UpperCAmelCase = objects else: line_index += 1 return backend_specific_objects def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if name.isupper(): return DUMMY_CONSTANT.format(lowercase ) elif name.islower(): return DUMMY_FUNCTION.format(lowercase ,lowercase ) else: return DUMMY_CLASS.format(lowercase ,lowercase ) def __UpperCAmelCase ( lowercase=None ): """simple docstring""" if backend_specific_objects is None: _UpperCAmelCase = read_init() # For special correspondence backend to module name as used in the function requires_modulename _UpperCAmelCase = {} for backend, objects in backend_specific_objects.items(): _UpperCAmelCase = """[""" + """, """.join(f'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]""" _UpperCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(lowercase ,lowercase ) for o in objects] ) _UpperCAmelCase = dummy_file return dummy_files def __UpperCAmelCase ( lowercase=False ): """simple docstring""" _UpperCAmelCase = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py _UpperCAmelCase = {"""torch""": """pt"""} # Locate actual dummy modules and read their content. _UpperCAmelCase = os.path.join(lowercase ,"""utils""" ) _UpperCAmelCase = { backend: os.path.join(lowercase ,f'''dummy_{short_names.get(lowercase ,lowercase )}_objects.py''' ) for backend in dummy_files.keys() } _UpperCAmelCase = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(lowercase ): with open(lowercase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: _UpperCAmelCase = f.read() else: _UpperCAmelCase = """""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f'''Updating diffusers.utils.dummy_{short_names.get(lowercase ,lowercase )}_objects.py as the main ''' """__init__ has new objects.""" ) with open(dummy_file_paths[backend] ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ f'''diffusers.utils.dummy_{short_names.get(lowercase ,lowercase )}_objects.py. Run `make fix-copies` ''' """to fix this.""" ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") UpperCAmelCase__ = parser.parse_args() check_dummies(args.fix_and_overwrite)
289
"""simple docstring""" import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : int = StableUnCLIPPipeline _snake_case : str = TEXT_TO_IMAGE_PARAMS _snake_case : Any = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS _snake_case : str = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _snake_case : str = False def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = 32 _UpperCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) _UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=__lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) _UpperCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__lowerCAmelCase , num_layers=1 , ) torch.manual_seed(0 ) _UpperCAmelCase = DDPMScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , ) # regular denoising components torch.manual_seed(0 ) _UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=__lowerCAmelCase ) _UpperCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) _UpperCAmelCase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCAmelCase , layers_per_block=1 , upcast_attention=__lowerCAmelCase , use_linear_projection=__lowerCAmelCase , ) torch.manual_seed(0 ) _UpperCAmelCase = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , ) torch.manual_seed(0 ) _UpperCAmelCase = AutoencoderKL() _UpperCAmelCase = { # prior components """prior_tokenizer""": prior_tokenizer, """prior_text_encoder""": prior_text_encoder, """prior""": prior, """prior_scheduler""": prior_scheduler, # image noising components """image_normalizer""": image_normalizer, """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder, """unet""": unet, """scheduler""": scheduler, """vae""": vae, } return components def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=0 ): if str(__lowerCAmelCase ).startswith("""mps""" ): _UpperCAmelCase = torch.manual_seed(__lowerCAmelCase ) else: _UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _UpperCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """prior_num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = torch_device == """cpu""" self._test_attention_slicing_forward_pass(test_max_difference=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=__lowerCAmelCase ) @slow @require_torch_gpu class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" ) _UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _UpperCAmelCase = pipe("""anime turle""" , generator=__lowerCAmelCase , output_type="""np""" ) _UpperCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) _UpperCAmelCase = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _UpperCAmelCase = pipe( """anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , ) _UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
289
1
"""simple docstring""" import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class a ( lowerCAmelCase_ ): def __init__( self : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : int=13 , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Optional[int]=99 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Any=32 , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Union[str, Any]=512 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : int="last" , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_lengths _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = gelu_activation _UpperCAmelCase = sinusoidal_embeddings _UpperCAmelCase = causal _UpperCAmelCase = asm _UpperCAmelCase = n_langs _UpperCAmelCase = vocab_size _UpperCAmelCase = n_special _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = summary_type _UpperCAmelCase = use_proj _UpperCAmelCase = scope def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_input_lengths: _UpperCAmelCase = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , 2 ).float() _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowerCAmelCase_ ( self : Tuple ): return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , ): _UpperCAmelCase = FlaubertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , lengths=__lowerCAmelCase , langs=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase , langs=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , ): _UpperCAmelCase = FlaubertWithLMHeadModel(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , ): _UpperCAmelCase = FlaubertForQuestionAnsweringSimple(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , ): _UpperCAmelCase = FlaubertForQuestionAnswering(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase ) _UpperCAmelCase = model( __lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , p_mask=__lowerCAmelCase , ) _UpperCAmelCase = model( __lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , ) ((_UpperCAmelCase) , ) = result_with_labels.to_tuple() _UpperCAmelCase = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase ) ((_UpperCAmelCase) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , ): _UpperCAmelCase = FlaubertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = FlaubertForTokenClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = FlaubertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : List[str] = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) _snake_case : str = ( { 'feature-extraction': FlaubertModel, 'fill-mask': FlaubertWithLMHeadModel, 'question-answering': FlaubertForQuestionAnsweringSimple, 'text-classification': FlaubertForSequenceClassification, 'token-classification': FlaubertForTokenClassification, 'zero-shot': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any]=False ): _UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = FlaubertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , emb_dim=37 ) def lowerCAmelCase_ ( self : int ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Union[str, Any] ): for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = FlaubertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @slow @require_torch_gpu def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return _UpperCAmelCase = True _UpperCAmelCase = model_class(config=__lowerCAmelCase ) _UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = torch.jit.trace( __lowerCAmelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , """traced_model.pt""" ) ) _UpperCAmelCase = torch.jit.load(os.path.join(__lowerCAmelCase , """traced_model.pt""" ) , map_location=__lowerCAmelCase ) loaded(inputs_dict["""input_ids"""].to(__lowerCAmelCase ) , inputs_dict["""attention_mask"""].to(__lowerCAmelCase ) ) @require_torch class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" ) _UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase )[0] _UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor( [[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
289
"""simple docstring""" from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
289
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""} UpperCAmelCase__ = { """vocab_file""": { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""", } } UpperCAmelCase__ = { """camembert-base""": 5_1_2, } UpperCAmelCase__ = """▁""" class a ( lowerCAmelCase_ ): _snake_case : Any = VOCAB_FILES_NAMES _snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP _snake_case : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Optional[int] = ['input_ids', 'attention_mask'] def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Tuple="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Optional[Any]="<unk>" , __lowerCAmelCase : Dict="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"] , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : str , ): # Mask token behave like a normal word, i.e. include the space before it _UpperCAmelCase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token _UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__lowerCAmelCase ) ) _UpperCAmelCase = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> _UpperCAmelCase = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3} _UpperCAmelCase = len(self.fairseq_tokens_to_ids ) _UpperCAmelCase = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) _UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] _UpperCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCAmelCase )) + [1] return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1] def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCAmelCase_ ( self : Tuple ): return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase_ ( self : int , __lowerCAmelCase : str ): return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(__lowerCAmelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(__lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : str ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] ): _UpperCAmelCase = [] _UpperCAmelCase = """""" _UpperCAmelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__lowerCAmelCase ) + token _UpperCAmelCase = True _UpperCAmelCase = [] else: current_sub_tokens.append(__lowerCAmelCase ) _UpperCAmelCase = False out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def __getstate__( self : Tuple ): _UpperCAmelCase = self.__dict__.copy() _UpperCAmelCase = None return state def __setstate__( self : List[str] , __lowerCAmelCase : int ): _UpperCAmelCase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _UpperCAmelCase = {} _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ): if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _UpperCAmelCase = os.path.join( __lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , """wb""" ) as fi: _UpperCAmelCase = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (out_vocab_file,)
289
"""simple docstring""" import requests UpperCAmelCase__ = """""" # <-- Put your OpenWeatherMap appid here! UpperCAmelCase__ = """https://api.openweathermap.org/data/2.5/""" def __UpperCAmelCase ( lowercase = "Chicago" ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """weather""" ,params=locals() ).json() def __UpperCAmelCase ( lowercase = "Kolkata, India" ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """forecast""" ,params=locals() ).json() def __UpperCAmelCase ( lowercase = 55.68 ,lowercase = 12.57 ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """onecall""" ,params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: UpperCAmelCase__ = input("""Enter a location:""").strip() if location: pprint(current_weather(location)) else: break
289
1
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _UpperCAmelCase = TapasConfig.from_json_file(lowercase ) # set absolute/relative position embeddings parameter _UpperCAmelCase = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "WTQ": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = True # hparam_utils.py hparams _UpperCAmelCase = 0.66_46_94 _UpperCAmelCase = 0.20_79_51 _UpperCAmelCase = 0.12_11_94 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = 0.0_35_25_13 _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = False # hparam_utils.py hparams _UpperCAmelCase = 36.45_19 _UpperCAmelCase = 0.90_34_21 _UpperCAmelCase = 2_22.0_88 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = 0.76_31_41 _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "TABFACT": _UpperCAmelCase = TapasForSequenceClassification(config=lowercase ) elif task == "MLM": _UpperCAmelCase = TapasForMaskedLM(config=lowercase ) elif task == "INTERMEDIATE_PRETRAINING": _UpperCAmelCase = TapasModel(config=lowercase ) else: raise ValueError(f'''Task {task} not supported.''' ) print(f'''Building PyTorch model from configuration: {config}''' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(lowercase ,lowercase ,lowercase ) # Save pytorch-model (weights and configuration) print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(lowercase ) # Save tokenizer files print(f'''Save tokenizer files to {pytorch_dump_path}''' ) _UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" ,model_max_length=5_12 ) tokenizer.save_pretrained(lowercase ) print("""Used relative position embeddings:""" ,model.config.reset_position_index_per_cell ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
289
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = get_failure_array(lowercase ) # 2) Step through text searching for pattern _UpperCAmelCase , _UpperCAmelCase = 0, 0 # index into text, pattern while i < len(lowercase ): if pattern[j] == text[i]: if j == (len(lowercase ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: _UpperCAmelCase = failure[j - 1] continue i += 1 return False def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [0] _UpperCAmelCase = 0 _UpperCAmelCase = 1 while j < len(lowercase ): if pattern[i] == pattern[j]: i += 1 elif i > 0: _UpperCAmelCase = failure[i - 1] continue j += 1 failure.append(lowercase ) return failure if __name__ == "__main__": # Test 1) UpperCAmelCase__ = """abc1abc12""" UpperCAmelCase__ = """alskfjaldsabc1abc1abc12k23adsfabcabc""" UpperCAmelCase__ = """alskfjaldsk23adsfabcabc""" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) UpperCAmelCase__ = """ABABX""" UpperCAmelCase__ = """ABABZABABYABABX""" assert kmp(pattern, text) # Test 3) UpperCAmelCase__ = """AAAB""" UpperCAmelCase__ = """ABAAAAAB""" assert kmp(pattern, text) # Test 4) UpperCAmelCase__ = """abcdabcy""" UpperCAmelCase__ = """abcxabcdabxabcdabcdabcy""" assert kmp(pattern, text) # Test 5) UpperCAmelCase__ = """aabaabaaa""" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
289
1
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = ReformerTokenizer _snake_case : Optional[Any] = ReformerTokenizerFast _snake_case : Optional[Any] = True _snake_case : Union[str, Any] = False _snake_case : str = True def lowerCAmelCase_ ( self : Optional[int] ): super().setUp() _UpperCAmelCase = ReformerTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = """<s>""" _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """j""" ) self.assertEqual(len(__lowerCAmelCase ) , 1000 ) def lowerCAmelCase_ ( self : Optional[int] ): self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def lowerCAmelCase_ ( self : Optional[int] ): if not self.test_rust_tokenizer: return _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = """I was born in 92000, and this is falsé.""" _UpperCAmelCase = tokenizer.tokenize(__lowerCAmelCase ) _UpperCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) _UpperCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = tokenizer.encode(__lowerCAmelCase ) _UpperCAmelCase = rust_tokenizer.encode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) # Simple input _UpperCAmelCase = """This is a simple input""" _UpperCAmelCase = ["""This is a simple input 1""", """This is a simple input 2"""] _UpperCAmelCase = ("""This is a simple input""", """This is a pair""") _UpperCAmelCase = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" ) # Simple input self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" ) # Simple input self.assertRaises( __lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" , ) # Pair input self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" ) # Pair input self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" ) # Pair input self.assertRaises( __lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" , ) def lowerCAmelCase_ ( self : str ): pass def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = ReformerTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase ) _UpperCAmelCase = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [285, 46, 10, 170, 382] , ) _UpperCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __lowerCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) _UpperCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) self.assertListEqual( __lowerCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase ) self.assertListEqual( __lowerCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def lowerCAmelCase_ ( self : Tuple ): return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" ) @slow def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = """Hello World!""" _UpperCAmelCase = [126, 32, 262, 152, 38, 72, 287] self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase ) ) @slow def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) _UpperCAmelCase = [ 108, 265, 24, 111, 4, 258, 156, 35, 28, 275, 3, 259, 297, 260, 84, 4, 35, 110, 44, 8, 259, 91, 268, 21, 11, 209, 274, 109, 266, 277, 117, 86, 93, 315, 258, 278, 258, 277, 258, 0, 258, 288, 258, 319, 258, 0, 258, 0, 258, 0, 258, 0, 258, 287, 258, 315, 258, 289, 258, 278, 99, 269, 266, 262, 8, 259, 241, 4, 217, 230, 268, 266, 55, 168, 106, 75, 193, 266, 223, 27, 49, 26, 282, 25, 264, 299, 19, 26, 0, 258, 277, 117, 86, 93, 176, 183, 270, 11, 262, 42, 61, 265, ] self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase ) ) @require_torch @slow def lowerCAmelCase_ ( self : str ): import torch from transformers import ReformerConfig, ReformerModel # Build sequence _UpperCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:10] _UpperCAmelCase = """ """.join(__lowerCAmelCase ) _UpperCAmelCase = self.big_tokenizer.encode_plus(__lowerCAmelCase , return_tensors="""pt""" ) _UpperCAmelCase = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" ) _UpperCAmelCase = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) _UpperCAmelCase = encoded_sequence["""input_ids"""].shape _UpperCAmelCase = ReformerModel(__lowerCAmelCase ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__lowerCAmelCase ) model(**__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Tuple ): # fmt: off _UpperCAmelCase = {"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 _UpperCAmelCase = [ """This is a very simple sentence.""", """The quick brown fox jumps over the lazy dog.""", ] self.tokenizer_integration_test_util( expected_encoding=__lowerCAmelCase , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=__lowerCAmelCase , sequences=__lowerCAmelCase , )
289
"""simple docstring""" from sklearn.metrics import recall_score import datasets UpperCAmelCase__ = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ UpperCAmelCase__ = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ UpperCAmelCase__ = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): def lowerCAmelCase_ ( self : Tuple ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]="binary" , __lowerCAmelCase : Any=None , __lowerCAmelCase : int="warn" , ): _UpperCAmelCase = recall_score( __lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase , zero_division=__lowerCAmelCase , ) return {"recall": float(__lowerCAmelCase ) if score.size == 1 else score}
289
1
"""simple docstring""" import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class a ( unittest.TestCase ): _snake_case : Tuple = JukeboxTokenizer _snake_case : Any = { 'artist': 'Zac Brown Band', 'genres': 'Country', 'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ', } @require_torch def lowerCAmelCase_ ( self : Dict ): import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" ) _UpperCAmelCase = tokenizer(**self.metas )["""input_ids"""] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def lowerCAmelCase_ ( self : Optional[int] ): import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" ) _UpperCAmelCase = tokenizer(**self.metas )["""input_ids"""] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
289
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase__ = """platform""" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class a : _snake_case : Tuple = PegasusConfig _snake_case : int = {} _snake_case : str = 'gelu' def __init__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=99 , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=20 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Any=0 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = eos_token_id _UpperCAmelCase = pad_token_id _UpperCAmelCase = bos_token_id def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) _UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) _UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _UpperCAmelCase = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return config, inputs_dict def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ): _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ): _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _UpperCAmelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ,): """simple docstring""" if attention_mask is None: _UpperCAmelCase = np.not_equal(lowercase ,config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: _UpperCAmelCase = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ), ] ,axis=-1 ,) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Dict = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) _snake_case : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () _snake_case : Optional[Any] = True _snake_case : List[str] = False _snake_case : Dict = False _snake_case : str = False def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = FlaxPegasusModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_class(__lowerCAmelCase ) @jax.jit def encode_jitted(__lowerCAmelCase : str , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Dict ): return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase ) with self.subTest("""JIT Enabled""" ): _UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = model_class(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) _UpperCAmelCase = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(__lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ): return model.decode( decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , ) with self.subTest("""JIT Enabled""" ): _UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCAmelCase_ ( self : Optional[int] ): for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__lowerCAmelCase ) _UpperCAmelCase = np.ones((1, 1) ) _UpperCAmelCase = model(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) _UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) _UpperCAmelCase = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] _UpperCAmelCase = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] _UpperCAmelCase = tokenizer(__lowerCAmelCase , return_tensors="""np""" , truncation=__lowerCAmelCase , max_length=512 , padding=__lowerCAmelCase ) _UpperCAmelCase = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences _UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) assert tgt_text == decoded
289
1
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase ) _UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" ) # Register commands get_config_parser(subparsers=lowercase ) env_command_parser(subparsers=lowercase ) launch_command_parser(subparsers=lowercase ) tpu_command_parser(subparsers=lowercase ) test_command_parser(subparsers=lowercase ) # Let's go _UpperCAmelCase = parser.parse_args() if not hasattr(lowercase ,"""func""" ): parser.print_help() exit(1 ) # Run args.func(lowercase ) if __name__ == "__main__": main()
289
"""simple docstring""" import math def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [] _UpperCAmelCase = 2 _UpperCAmelCase = int(math.sqrt(lowercase ) ) # Size of every segment _UpperCAmelCase = [True] * (end + 1) _UpperCAmelCase = [] while start <= end: if temp[start] is True: in_prime.append(lowercase ) for i in range(start * start ,end + 1 ,lowercase ): _UpperCAmelCase = False start += 1 prime += in_prime _UpperCAmelCase = end + 1 _UpperCAmelCase = min(2 * end ,lowercase ) while low <= n: _UpperCAmelCase = [True] * (high - low + 1) for each in in_prime: _UpperCAmelCase = math.floor(low / each ) * each if t < low: t += each for j in range(lowercase ,high + 1 ,lowercase ): _UpperCAmelCase = False for j in range(len(lowercase ) ): if temp[j] is True: prime.append(j + low ) _UpperCAmelCase = high + 1 _UpperCAmelCase = min(high + end ,lowercase ) return prime print(sieve(1_0**6))
289
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = len(lowercase ) print("""The following activities are selected:""" ) # The first activity is always selected _UpperCAmelCase = 0 print(lowercase ,end=""",""" ) # Consider rest of the activities for j in range(lowercase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(lowercase ,end=""",""" ) _UpperCAmelCase = j if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase__ = [1, 3, 0, 5, 8, 5] UpperCAmelCase__ = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
289
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _UpperCAmelCase = TapasConfig.from_json_file(lowercase ) # set absolute/relative position embeddings parameter _UpperCAmelCase = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "WTQ": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = True # hparam_utils.py hparams _UpperCAmelCase = 0.66_46_94 _UpperCAmelCase = 0.20_79_51 _UpperCAmelCase = 0.12_11_94 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = 0.0_35_25_13 _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = False # hparam_utils.py hparams _UpperCAmelCase = 36.45_19 _UpperCAmelCase = 0.90_34_21 _UpperCAmelCase = 2_22.0_88 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = 0.76_31_41 _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "TABFACT": _UpperCAmelCase = TapasForSequenceClassification(config=lowercase ) elif task == "MLM": _UpperCAmelCase = TapasForMaskedLM(config=lowercase ) elif task == "INTERMEDIATE_PRETRAINING": _UpperCAmelCase = TapasModel(config=lowercase ) else: raise ValueError(f'''Task {task} not supported.''' ) print(f'''Building PyTorch model from configuration: {config}''' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(lowercase ,lowercase ,lowercase ) # Save pytorch-model (weights and configuration) print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(lowercase ) # Save tokenizer files print(f'''Save tokenizer files to {pytorch_dump_path}''' ) _UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" ,model_max_length=5_12 ) tokenizer.save_pretrained(lowercase ) print("""Used relative position embeddings:""" ,model.config.reset_position_index_per_cell ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
289
1
"""simple docstring""" import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class a : def __init__( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : Tuple=7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Any=32 , __lowerCAmelCase : Optional[Any]=5 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Any=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : List[Any]=512 , __lowerCAmelCase : Dict=16 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Dict=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Optional[Any] ): return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , use_stable_embedding=__lowerCAmelCase , ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ): _UpperCAmelCase = OpenLlamaModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , ): _UpperCAmelCase = True _UpperCAmelCase = OpenLlamaModel(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , ) _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , ) _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , ): _UpperCAmelCase = OpenLlamaForCausalLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , ): _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = OpenLlamaForCausalLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() # first forward pass _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase , ) _UpperCAmelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) _UpperCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) _UpperCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 ) _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )["""hidden_states"""][0] _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )["""hidden_states"""][0] # select random slice _UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() _UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() _UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 ) ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : str = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) _snake_case : Dict = (OpenLlamaForCausalLM,) if is_torch_available() else () _snake_case : Tuple = ( { 'feature-extraction': OpenLlamaModel, 'text-classification': OpenLlamaForSequenceClassification, 'text-generation': OpenLlamaForCausalLM, 'zero-shot': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) _snake_case : Optional[int] = False _snake_case : str = False def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = OpenLlamaModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : List[str] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase = type self.model_tester.create_and_check_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = input_dict["""input_ids"""] _UpperCAmelCase = input_ids.ne(1 ).to(__lowerCAmelCase ) _UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _UpperCAmelCase = OpenLlamaForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = """single_label_classification""" _UpperCAmelCase = input_dict["""input_ids"""] _UpperCAmelCase = input_ids.ne(1 ).to(__lowerCAmelCase ) _UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _UpperCAmelCase = OpenLlamaForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = """multi_label_classification""" _UpperCAmelCase = input_dict["""input_ids"""] _UpperCAmelCase = input_ids.ne(1 ).to(__lowerCAmelCase ) _UpperCAmelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) _UpperCAmelCase = OpenLlamaForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" ) def lowerCAmelCase_ ( self : Any ): pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = ids_tensor([1, 10] , config.vocab_size ) _UpperCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights _UpperCAmelCase = OpenLlamaModel(__lowerCAmelCase ) original_model.to(__lowerCAmelCase ) original_model.eval() _UpperCAmelCase = original_model(__lowerCAmelCase ).last_hidden_state _UpperCAmelCase = original_model(__lowerCAmelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights _UpperCAmelCase = {"""type""": scaling_type, """factor""": 10.0} _UpperCAmelCase = OpenLlamaModel(__lowerCAmelCase ) scaled_model.to(__lowerCAmelCase ) scaled_model.eval() _UpperCAmelCase = scaled_model(__lowerCAmelCase ).last_hidden_state _UpperCAmelCase = scaled_model(__lowerCAmelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-5 ) )
289
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml UpperCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" def run_func(lowercase ): @wraps(lowercase ) def run_in_eager_mode(*lowercase ,**lowercase ): return func(*lowercase ,**lowercase ) @wraps(lowercase ) @tf.function(experimental_compile=lowercase ) def run_in_graph_mode(*lowercase ,**lowercase ): return func(*lowercase ,**lowercase ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = random.Random() _UpperCAmelCase = [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(lowercase ,shape=(batch_size, sequence_length) ,dtype=tf.intaa ) class a ( lowerCAmelCase_ ): _snake_case : TensorFlowBenchmarkArguments _snake_case : PretrainedConfig _snake_case : str = "TensorFlow" @property def lowerCAmelCase_ ( self : Union[str, Any] ): return tf.__version__ def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): # initialize GPU on separate process _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_speed(_inference ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_speed(_train ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase ) _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_memory(_inference ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase ) _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_memory(_train ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _UpperCAmelCase = ( hasattr(__lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , __lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_cls(__lowerCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _UpperCAmelCase = TF_MODEL_MAPPING[config.__class__](__lowerCAmelCase ) # encoder-decoder has vocab size saved differently _UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , training=__lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(__lowerCAmelCase , training=__lowerCAmelCase ) _UpperCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _UpperCAmelCase = ( hasattr(__lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , __lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_cls(__lowerCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _UpperCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCAmelCase ) # encoder-decoder has vocab size saved differently _UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): _UpperCAmelCase = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0] _UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): _UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0] _UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables ) return gradients _UpperCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Any ): with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(__lowerCAmelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _UpperCAmelCase = timeit.repeat( __lowerCAmelCase , repeat=self.args.repeat , number=10 , ) return min(__lowerCAmelCase ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Callable[[], None] ): logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _UpperCAmelCase = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _UpperCAmelCase = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _UpperCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _UpperCAmelCase = nvml.nvmlDeviceGetMemoryInfo(__lowerCAmelCase ) _UpperCAmelCase = meminfo.used _UpperCAmelCase = Memory(__lowerCAmelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _UpperCAmelCase = None else: _UpperCAmelCase = measure_peak_memory_cpu(__lowerCAmelCase ) _UpperCAmelCase = Memory(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else memory_bytes if self.args.trace_memory_line_by_line: _UpperCAmelCase = stop_memory_tracing(__lowerCAmelCase ) if memory is None: _UpperCAmelCase = summary.total else: _UpperCAmelCase = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
289
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ): """simple docstring""" if not isinstance(lowercase ,lowercase ): raise ValueError("""check_bouncy() accepts only integer arguments""" ) _UpperCAmelCase = str(lowercase ) _UpperCAmelCase = """""".join(sorted(lowercase ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def __UpperCAmelCase ( lowercase = 99 ): """simple docstring""" if not 0 < percent < 1_00: raise ValueError("""solution() only accepts values from 0 to 100""" ) _UpperCAmelCase = 0 _UpperCAmelCase = 1 while True: if check_bouncy(lowercase ): bouncy_num += 1 if (bouncy_num / num) * 1_00 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(F'''{solution(9_9)}''')
289
"""simple docstring""" from math import pow def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,): """simple docstring""" if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count _UpperCAmelCase = int(pow(lowercase ,lowercase ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n _UpperCAmelCase , _UpperCAmelCase = backtrack( lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. _UpperCAmelCase , _UpperCAmelCase = backtrack( lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase ) return current_sum, solutions_count def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10): raise ValueError( """Invalid input\n""" """needed_sum must be between 1 and 1000, power between 2 and 10.""" ) return backtrack(lowercase ,lowercase ,1 ,0 ,0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
289
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCAmelCase__ = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
289
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } UpperCAmelCase__ = { """b0""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 2_2_4, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 2_4_0, """dropout_rate""": 0.2, """dw_padding""": [1_6], }, """b2""": { """hidden_dim""": 1_4_0_8, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 2_6_0, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 1_6], }, """b3""": { """hidden_dim""": 1_5_3_6, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 3_0_0, """dropout_rate""": 0.3, """dw_padding""": [5, 1_8], }, """b4""": { """hidden_dim""": 1_7_9_2, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 3_8_0, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_0_4_8, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 4_5_6, """dropout_rate""": 0.4, """dw_padding""": [1_3, 2_7], }, """b6""": { """hidden_dim""": 2_3_0_4, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 5_2_8, """dropout_rate""": 0.5, """dw_padding""": [3_1], }, """b7""": { """hidden_dim""": 2_5_6_0, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 6_0_0, """dropout_rate""": 0.5, """dw_padding""": [1_8], }, } def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = EfficientNetConfig() _UpperCAmelCase = CONFIG_MAP[model_name]["""hidden_dim"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""width_coef"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""depth_coef"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""dropout_rate"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""dw_padding"""] _UpperCAmelCase = """huggingface/label-files""" _UpperCAmelCase = """imagenet-1k-id2label.json""" _UpperCAmelCase = 10_00 _UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) ) _UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw ) return im def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = EfficientNetImageProcessor( size={"""height""": size, """width""": size} ,image_mean=[0.4_85, 0.4_56, 0.4_06] ,image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] ,do_center_crop=lowercase ,) return preprocessor def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] _UpperCAmelCase = sorted(set(lowercase ) ) _UpperCAmelCase = len(lowercase ) _UpperCAmelCase = {b: str(lowercase ) for b, i in zip(lowercase ,range(lowercase ) )} _UpperCAmelCase = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: _UpperCAmelCase = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) _UpperCAmelCase = {} for item in rename_keys: if item[0] in original_param_names: _UpperCAmelCase = """efficientnet.""" + item[1] _UpperCAmelCase = """classifier.weight""" _UpperCAmelCase = """classifier.bias""" return key_mapping def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue _UpperCAmelCase = key_mapping[key] if "_conv" in key and "kernel" in key: _UpperCAmelCase = torch.from_numpy(lowercase ).permute(3 ,2 ,0 ,1 ) elif "depthwise_kernel" in key: _UpperCAmelCase = torch.from_numpy(lowercase ).permute(2 ,3 ,0 ,1 ) elif "kernel" in key: _UpperCAmelCase = torch.from_numpy(np.transpose(lowercase ) ) else: _UpperCAmelCase = torch.from_numpy(lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase ) @torch.no_grad() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = model_classes[model_name]( include_top=lowercase ,weights="""imagenet""" ,input_tensor=lowercase ,input_shape=lowercase ,pooling=lowercase ,classes=10_00 ,classifier_activation="""softmax""" ,) _UpperCAmelCase = original_model.trainable_variables _UpperCAmelCase = original_model.non_trainable_variables _UpperCAmelCase = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: _UpperCAmelCase = param.numpy() _UpperCAmelCase = list(tf_params.keys() ) # Load HuggingFace model _UpperCAmelCase = get_efficientnet_config(lowercase ) _UpperCAmelCase = EfficientNetForImageClassification(lowercase ).eval() _UpperCAmelCase = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) _UpperCAmelCase = rename_keys(lowercase ) replace_params(lowercase ,lowercase ,lowercase ) # Initialize preprocessor and preprocess input image _UpperCAmelCase = convert_image_processor(lowercase ) _UpperCAmelCase = preprocessor(images=prepare_img() ,return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): _UpperCAmelCase = hf_model(**lowercase ) _UpperCAmelCase = outputs.logits.detach().numpy() # Original model inference _UpperCAmelCase = False _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST ) _UpperCAmelCase = image.img_to_array(lowercase ) _UpperCAmelCase = np.expand_dims(lowercase ,axis=0 ) _UpperCAmelCase = original_model.predict(lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase ,lowercase ,atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase ): os.mkdir(lowercase ) # Save converted model and image processor hf_model.save_pretrained(lowercase ) preprocessor.save_pretrained(lowercase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) _UpperCAmelCase = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowercase ) hf_model.push_to_hub(lowercase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") UpperCAmelCase__ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
289
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [0 for i in range(len(lowercase ) )] # initialize interval's left pointer and right pointer _UpperCAmelCase , _UpperCAmelCase = 0, 0 for i in range(1 ,len(lowercase ) ): # case when current index is inside the interval if i <= right_pointer: _UpperCAmelCase = min(right_pointer - i + 1 ,z_result[i - left_pointer] ) _UpperCAmelCase = min_edge while go_next(lowercase ,lowercase ,lowercase ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: _UpperCAmelCase , _UpperCAmelCase = i, i + z_result[i] - 1 return z_result def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" return i + z_result[i] < len(lowercase ) and s[z_result[i]] == s[i + z_result[i]] def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string _UpperCAmelCase = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(lowercase ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
289
"""simple docstring""" from __future__ import annotations from collections import Counter from random import random class a : def __init__( self : Union[str, Any] ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : float ): if nodea not in self.connections: self.add_node(__lowerCAmelCase ) if nodea not in self.connections: self.add_node(__lowerCAmelCase ) _UpperCAmelCase = probability def lowerCAmelCase_ ( self : Optional[Any] ): return list(self.connections ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ): _UpperCAmelCase = 0 _UpperCAmelCase = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(lowercase ,lowercase ,lowercase ) _UpperCAmelCase = Counter(graph.get_nodes() ) _UpperCAmelCase = start for _ in range(lowercase ): _UpperCAmelCase = graph.transition(lowercase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
289
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class a : def __init__( self : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any]=13 , __lowerCAmelCase : Optional[int]=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : str=True , __lowerCAmelCase : str=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=99 , __lowerCAmelCase : List[str]=32 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : str=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : List[Any]=512 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : str=3 , __lowerCAmelCase : str=4 , __lowerCAmelCase : List[Any]=None , ): _UpperCAmelCase = parent _UpperCAmelCase = 13 _UpperCAmelCase = 7 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = 99 _UpperCAmelCase = 32 _UpperCAmelCase = 2 _UpperCAmelCase = 4 _UpperCAmelCase = 37 _UpperCAmelCase = """gelu""" _UpperCAmelCase = 0.1 _UpperCAmelCase = 0.1 _UpperCAmelCase = 512 _UpperCAmelCase = 16 _UpperCAmelCase = 2 _UpperCAmelCase = 0.02 _UpperCAmelCase = 3 _UpperCAmelCase = 4 _UpperCAmelCase = None def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ): _UpperCAmelCase = TFRoFormerModel(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = [input_ids, input_mask] _UpperCAmelCase = model(__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] ): _UpperCAmelCase = True _UpperCAmelCase = TFRoFormerForCausalLM(config=__lowerCAmelCase ) _UpperCAmelCase = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _UpperCAmelCase = model(__lowerCAmelCase )["""logits"""] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ): _UpperCAmelCase = TFRoFormerForMaskedLM(config=__lowerCAmelCase ) _UpperCAmelCase = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFRoFormerForSequenceClassification(config=__lowerCAmelCase ) _UpperCAmelCase = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = TFRoFormerForMultipleChoice(config=__lowerCAmelCase ) _UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFRoFormerForTokenClassification(config=__lowerCAmelCase ) _UpperCAmelCase = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ): _UpperCAmelCase = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase ) _UpperCAmelCase = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[Any] = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) _snake_case : int = ( { 'feature-extraction': TFRoFormerModel, 'fill-mask': TFRoFormerForMaskedLM, 'question-answering': TFRoFormerForQuestionAnswering, 'text-classification': TFRoFormerForSequenceClassification, 'text-generation': TFRoFormerForCausalLM, 'token-classification': TFRoFormerForTokenClassification, 'zero-shot': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) _snake_case : Tuple = False _snake_case : Optional[Any] = False def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any ): if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = TFRoFormerModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Tuple ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) _UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) _UpperCAmelCase = model(__lowerCAmelCase )[0] # TODO Replace vocab size _UpperCAmelCase = 5_0000 _UpperCAmelCase = [1, 6, vocab_size] self.assertEqual(output.shape , __lowerCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. _UpperCAmelCase = tf.constant( [ [ [-0.12_053_341, -1.0_264_901, 0.29_221_946], [-1.5_133_783, 0.197_433, 0.15_190_607], [-5.0_135_403, -3.900_256, -0.84_038_764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) @require_tf class a ( unittest.TestCase ): _snake_case : str = 1e-4 def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = tf.constant([[4, 10]] ) _UpperCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) _UpperCAmelCase = emba(input_ids.shape ) _UpperCAmelCase = tf.constant( [[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] ) tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = tf.constant( [ [0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000], [0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617], [0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870], ] ) _UpperCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 ) emba([2, 16, 512] ) _UpperCAmelCase = emba.weight[:3, :5] tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance ) @require_tf class a ( unittest.TestCase ): _snake_case : int = 1e-4 def lowerCAmelCase_ ( self : Any ): # 2,12,16,64 _UpperCAmelCase = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _UpperCAmelCase = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _UpperCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) _UpperCAmelCase = embed_positions([2, 16, 768] )[None, None, :, :] _UpperCAmelCase , _UpperCAmelCase = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = tf.constant( [ [0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700], [-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343], [-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985], [-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871], [0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980], [3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253], ] ) _UpperCAmelCase = tf.constant( [ [0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700], [0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343], [1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985], [2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871], [-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980], [-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
289
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a ( unittest.TestCase ): def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Optional[Any]=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : List[str]=400 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]=True , ): _UpperCAmelCase = size if size is not None else {"""shortest_edge""": 20} _UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = do_flip_channel_order def lowerCAmelCase_ ( self : List[str] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = MobileViTImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = MobileViTImageProcessingTester(self ) @property def lowerCAmelCase_ ( self : Tuple ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_flip_channel_order""" ) ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def lowerCAmelCase_ ( self : List[str] ): pass def lowerCAmelCase_ ( self : Dict ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : str ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : Optional[int] ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
289
1
"""simple docstring""" import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = s.rsplit(lowercase ,lowercase ) return new.join(lowercase ) def __UpperCAmelCase ( lowercase ): """simple docstring""" # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = {} _UpperCAmelCase = ["""group_1""", """group_2""", """group_3""", """group_4"""] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: _UpperCAmelCase = key.replace(f'''{group_key}.''' ,f'''{group_key}.group.''' ) if "res_path" in key: _UpperCAmelCase = key.replace("""res_path.""" ,"""res_path.path.""" ) if key.endswith(""".w""" ): _UpperCAmelCase = rreplace(lowercase ,""".w""" ,""".weight""" ,1 ) if key.endswith(""".b""" ): _UpperCAmelCase = rreplace(lowercase ,""".b""" ,""".bias""" ,1 ) _UpperCAmelCase = value.float() return upgrade @torch.no_grad() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=None ,lowercase=True ): """simple docstring""" from dall_e import Encoder _UpperCAmelCase = Encoder() if os.path.exists(lowercase ): _UpperCAmelCase = torch.load(lowercase ) else: _UpperCAmelCase = torch.hub.load_state_dict_from_url(lowercase ) if isinstance(lowercase ,lowercase ): _UpperCAmelCase = ckpt.state_dict() encoder.load_state_dict(lowercase ) if config_path is not None: _UpperCAmelCase = FlavaImageCodebookConfig.from_pretrained(lowercase ) else: _UpperCAmelCase = FlavaImageCodebookConfig() _UpperCAmelCase = FlavaImageCodebook(lowercase ).eval() _UpperCAmelCase = encoder.state_dict() _UpperCAmelCase = upgrade_state_dict(lowercase ) hf_model.load_state_dict(lowercase ) _UpperCAmelCase = hf_model.state_dict() _UpperCAmelCase = count_parameters(lowercase ) _UpperCAmelCase = count_parameters(lowercase ) assert torch.allclose(lowercase ,lowercase ,atol=1E-3 ) if save_checkpoint: hf_model.save_pretrained(lowercase ) else: return hf_state_dict if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") UpperCAmelCase__ = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
289
"""simple docstring""" from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Any = 'efficientnet' def __init__( self : Any , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 600 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2560 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.001 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : List[Any] , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = width_coefficient _UpperCAmelCase = depth_coefficient _UpperCAmelCase = depth_divisor _UpperCAmelCase = kernel_sizes _UpperCAmelCase = in_channels _UpperCAmelCase = out_channels _UpperCAmelCase = depthwise_padding _UpperCAmelCase = strides _UpperCAmelCase = num_block_repeats _UpperCAmelCase = expand_ratios _UpperCAmelCase = squeeze_expansion_ratio _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dim _UpperCAmelCase = pooling_type _UpperCAmelCase = initializer_range _UpperCAmelCase = batch_norm_eps _UpperCAmelCase = batch_norm_momentum _UpperCAmelCase = dropout_rate _UpperCAmelCase = drop_connect_rate _UpperCAmelCase = sum(__lowerCAmelCase ) * 4 class a ( lowerCAmelCase_ ): _snake_case : Dict = version.parse('1.11' ) @property def lowerCAmelCase_ ( self : Any ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase_ ( self : int ): return 1e-5
289
1
"""simple docstring""" # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib UpperCAmelCase__ = get_logger() UpperCAmelCase__ = None class a ( TensorFormatter[Mapping, 'jax.Array', Mapping] ): def __init__( self : Any , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : Optional[int] ): super().__init__(features=__lowerCAmelCase ) import jax from jaxlib.xla_client import Device if isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError( f'''Expected {device} to be a `str` not {type(__lowerCAmelCase )}, as `jaxlib.xla_extension.Device` ''' """is not serializable neither with `pickle` nor with `dill`. Instead you can surround """ """the device with `str()` to get its string identifier that will be internally mapped """ """to the actual `jaxlib.xla_extension.Device`.""" ) _UpperCAmelCase = device if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: _UpperCAmelCase = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( f'''Device with string identifier {self.device} not listed among the available ''' f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ''' f'''device: {str(jax.devices()[0] )}.''' ) _UpperCAmelCase = str(jax.devices()[0] ) _UpperCAmelCase = jnp_array_kwargs @staticmethod def lowerCAmelCase_ ( ): import jax return {str(__lowerCAmelCase ): device for device in jax.devices()} def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Any ): import jax import jax.numpy as jnp if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and column: if all( isinstance(__lowerCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(__lowerCAmelCase , axis=0 ) return column def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Tuple ): import jax import jax.numpy as jnp if isinstance(__lowerCAmelCase , (str, bytes, type(__lowerCAmelCase )) ): return value elif isinstance(__lowerCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() _UpperCAmelCase = {} if isinstance(__lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: _UpperCAmelCase = {"""dtype""": jnp.intaa} else: _UpperCAmelCase = {"""dtype""": jnp.intaa} elif isinstance(__lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): _UpperCAmelCase = {"""dtype""": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(__lowerCAmelCase , PIL.Image.Image ): _UpperCAmelCase = np.asarray(__lowerCAmelCase ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: _UpperCAmelCase = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(__lowerCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} ) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : str ): import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(__lowerCAmelCase , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(__lowerCAmelCase , """__array__""" ) and not isinstance(__lowerCAmelCase , jax.Array ): _UpperCAmelCase = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(__lowerCAmelCase , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(__lowerCAmelCase ) for substruct in data_struct] ) elif isinstance(__lowerCAmelCase , (list, tuple) ): return self._consolidate([self.recursive_tensorize(__lowerCAmelCase ) for substruct in data_struct] ) return self._tensorize(__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : dict ): return map_nested(self._recursive_tensorize , __lowerCAmelCase , map_list=__lowerCAmelCase ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : pa.Table ): _UpperCAmelCase = self.numpy_arrow_extractor().extract_row(__lowerCAmelCase ) _UpperCAmelCase = self.python_features_decoder.decode_row(__lowerCAmelCase ) return self.recursive_tensorize(__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : pa.Table ): _UpperCAmelCase = self.numpy_arrow_extractor().extract_column(__lowerCAmelCase ) _UpperCAmelCase = self.python_features_decoder.decode_column(__lowerCAmelCase , pa_table.column_names[0] ) _UpperCAmelCase = self.recursive_tensorize(__lowerCAmelCase ) _UpperCAmelCase = self._consolidate(__lowerCAmelCase ) return column def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : pa.Table ): _UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(__lowerCAmelCase ) _UpperCAmelCase = self.python_features_decoder.decode_batch(__lowerCAmelCase ) _UpperCAmelCase = self.recursive_tensorize(__lowerCAmelCase ) for column_name in batch: _UpperCAmelCase = self._consolidate(batch[column_name] ) return batch
289
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class a : def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : str=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Optional[int]=16 , __lowerCAmelCase : Dict=36 , __lowerCAmelCase : Optional[Any]=6 , __lowerCAmelCase : List[str]=6 , __lowerCAmelCase : Union[str, Any]=6 , __lowerCAmelCase : str=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Any=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = embedding_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_hidden_groups _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Union[str, Any] ): return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Any ): _UpperCAmelCase = AlbertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ): _UpperCAmelCase = AlbertForPreTraining(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , sentence_order_label=__lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ): _UpperCAmelCase = AlbertForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ): _UpperCAmelCase = AlbertForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = AlbertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = AlbertForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = AlbertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : str = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) _snake_case : Tuple = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) _snake_case : Dict = True def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any]=False ): _UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): _UpperCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase ) _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = AlbertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Optional[int] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase = type self.model_tester.create_and_check_model(*__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = AlbertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = AlbertModel.from_pretrained("""albert-base-v2""" ) _UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] _UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
289
1
"""simple docstring""" import argparse import os import re import packaging.version UpperCAmelCase__ = """examples/""" UpperCAmelCase__ = { """examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""), """doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } UpperCAmelCase__ = { """init""": """src/diffusers/__init__.py""", """setup""": """setup.py""", } UpperCAmelCase__ = """README.md""" def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" with open(lowercase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: _UpperCAmelCase = f.read() _UpperCAmelCase , _UpperCAmelCase = REPLACE_PATTERNS[pattern] _UpperCAmelCase = replace.replace("""VERSION""" ,lowercase ) _UpperCAmelCase = re_pattern.sub(lowercase ,lowercase ) with open(lowercase ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.write(lowercase ) def __UpperCAmelCase ( lowercase ): """simple docstring""" for folder, directories, fnames in os.walk(lowercase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(lowercase ,lowercase ) ,lowercase ,pattern="""examples""" ) def __UpperCAmelCase ( lowercase ,lowercase=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(lowercase ,lowercase ,lowercase ) if not patch: update_version_in_examples(lowercase ) def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = """🤗 Transformers currently provides the following architectures""" _UpperCAmelCase = """1. Want to contribute a new model?""" with open(lowercase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: _UpperCAmelCase = f.readlines() # Find the start of the list. _UpperCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 _UpperCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): _UpperCAmelCase = lines[index].replace( """https://huggingface.co/docs/diffusers/main/model_doc""" ,"""https://huggingface.co/docs/diffusers/model_doc""" ,) index += 1 with open(lowercase ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.writelines(lowercase ) def __UpperCAmelCase ( ): """simple docstring""" with open(REPLACE_FILES["""init"""] ,"""r""" ) as f: _UpperCAmelCase = f.read() _UpperCAmelCase = REPLACE_PATTERNS["""init"""][0].search(lowercase ).groups()[0] return packaging.version.parse(lowercase ) def __UpperCAmelCase ( lowercase=False ): """simple docstring""" _UpperCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: _UpperCAmelCase = default_version.base_version elif patch: _UpperCAmelCase = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: _UpperCAmelCase = f'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. _UpperCAmelCase = input(f'''Which version are you releasing? [{default_version}]''' ) if len(lowercase ) == 0: _UpperCAmelCase = default_version print(f'''Updating version to {version}.''' ) global_version_update(lowercase ,patch=lowercase ) def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = get_version() _UpperCAmelCase = f'''{current_version.major}.{current_version.minor + 1}.0.dev0''' _UpperCAmelCase = current_version.base_version # Check with the user we got that right. _UpperCAmelCase = input(f'''Which version are we developing now? [{dev_version}]''' ) if len(lowercase ) == 0: _UpperCAmelCase = dev_version print(f'''Updating version to {version}.''' ) global_version_update(lowercase ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") UpperCAmelCase__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
289
"""simple docstring""" UpperCAmelCase__ = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" # Return True if there is node that has not iterated. _UpperCAmelCase = [False] * len(lowercase ) _UpperCAmelCase = [s] _UpperCAmelCase = True while queue: _UpperCAmelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase ) _UpperCAmelCase = True _UpperCAmelCase = u return visited[t] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = [-1] * (len(lowercase )) _UpperCAmelCase = 0 _UpperCAmelCase = [] _UpperCAmelCase = [i[:] for i in graph] # Record original cut, copy. while bfs(lowercase ,lowercase ,lowercase ,lowercase ): _UpperCAmelCase = float("""Inf""" ) _UpperCAmelCase = sink while s != source: # Find the minimum value in select path _UpperCAmelCase = min(lowercase ,graph[parent[s]][s] ) _UpperCAmelCase = parent[s] max_flow += path_flow _UpperCAmelCase = sink while v != source: _UpperCAmelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCAmelCase = parent[v] for i in range(len(lowercase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
289
1
"""simple docstring""" import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Dict = CpmAntTokenizer _snake_case : List[str] = False def lowerCAmelCase_ ( self : List[str] ): super().setUp() _UpperCAmelCase = [ """<d>""", """</d>""", """<s>""", """</s>""", """</_>""", """<unk>""", """<pad>""", """</n>""", """我""", """是""", """C""", """P""", """M""", """A""", """n""", """t""", ] _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) @tooslow def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" ) _UpperCAmelCase = """今天天气真好!""" _UpperCAmelCase = ["""今天""", """天气""", """真""", """好""", """!"""] _UpperCAmelCase = tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = """今天天气真好!""" _UpperCAmelCase = [tokenizer.bos_token] + tokens _UpperCAmelCase = [6, 9802, 1_4962, 2082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase ) _UpperCAmelCase = tokenizer.decode(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
289
"""simple docstring""" import math class a : def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ): _UpperCAmelCase = 0.0 _UpperCAmelCase = 0.0 for i in range(len(__lowerCAmelCase ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : list[list[int | float]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : float ): for i in range(len(__lowerCAmelCase ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def __UpperCAmelCase ( ): """simple docstring""" # Training Examples ( m, n ) _UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _UpperCAmelCase = SelfOrganizingMap() _UpperCAmelCase = 3 _UpperCAmelCase = 0.5 for _ in range(lowercase ): for j in range(len(lowercase ) ): # training sample _UpperCAmelCase = training_samples[j] # Compute the winning vector _UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase ) # Update the winning vector _UpperCAmelCase = self_organizing_map.update(lowercase ,lowercase ,lowercase ,lowercase ) # classify test sample _UpperCAmelCase = [0, 0, 0, 1] _UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase ) # results print(f'''Clusters that the test sample belongs to : {winner}''' ) print(f'''Weights that have been trained : {weights}''' ) # running the main() function if __name__ == "__main__": main()
289
1
"""simple docstring""" import requests UpperCAmelCase__ = """""" # <-- Put your OpenWeatherMap appid here! UpperCAmelCase__ = """https://api.openweathermap.org/data/2.5/""" def __UpperCAmelCase ( lowercase = "Chicago" ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """weather""" ,params=locals() ).json() def __UpperCAmelCase ( lowercase = "Kolkata, India" ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """forecast""" ,params=locals() ).json() def __UpperCAmelCase ( lowercase = 55.68 ,lowercase = 12.57 ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """onecall""" ,params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: UpperCAmelCase__ = input("""Enter a location:""").strip() if location: pprint(current_weather(location)) else: break
289
"""simple docstring""" import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class a : def __init__( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=99 , __lowerCAmelCase : int=64 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Union[str, Any]=64 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : str=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def lowerCAmelCase_ ( self : Union[str, Any] ): return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Optional[int] ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ): _UpperCAmelCase = MPNetModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ): _UpperCAmelCase = MPNetForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = MPNetForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = MPNetForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = MPNetForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.prepare_config_and_inputs() ((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : List[Any] = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) _snake_case : Union[str, Any] = ( { 'feature-extraction': MPNetModel, 'fill-mask': MPNetForMaskedLM, 'question-answering': MPNetForQuestionAnswering, 'text-classification': MPNetForSequenceClassification, 'token-classification': MPNetForTokenClassification, 'zero-shot': MPNetForSequenceClassification, } if is_torch_available() else {} ) _snake_case : int = False _snake_case : List[Any] = True def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = MPNetModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Dict ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*__lowerCAmelCase ) @require_torch class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = MPNetModel.from_pretrained("""microsoft/mpnet-base""" ) _UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCAmelCase = model(__lowerCAmelCase )[0] _UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor( [[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
289
1
"""simple docstring""" import baseaa def __UpperCAmelCase ( lowercase ): """simple docstring""" return baseaa.aaaencode(string.encode("""utf-8""" ) ) def __UpperCAmelCase ( lowercase ): """simple docstring""" return baseaa.aaadecode(lowercase ).decode("""utf-8""" ) if __name__ == "__main__": import doctest doctest.testmod()
289
"""simple docstring""" UpperCAmelCase__ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) UpperCAmelCase__ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 1_2, """Pm""": 1_5, """Em""": 1_8, """Zm""": 2_1, """Ym""": 2_4, } def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = from_type.lower().strip("""s""" ) _UpperCAmelCase = to_type.lower().strip("""s""" ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) if from_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'from_type\' value: {from_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) if to_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'to_type\' value: {to_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) _UpperCAmelCase = METRIC_CONVERSION[from_sanitized] _UpperCAmelCase = METRIC_CONVERSION[to_sanitized] _UpperCAmelCase = 1 if from_exponent > to_exponent: _UpperCAmelCase = from_exponent - to_exponent else: _UpperCAmelCase = -(to_exponent - from_exponent) return value * pow(10 ,lowercase ) if __name__ == "__main__": from doctest import testmod testmod()
289
1
"""simple docstring""" import os def __UpperCAmelCase ( ): """simple docstring""" with open(os.path.dirname(lowercase ) + """/p022_names.txt""" ) as file: _UpperCAmelCase = str(file.readlines()[0] ) _UpperCAmelCase = names.replace("""\"""" ,"""""" ).split(""",""" ) names.sort() _UpperCAmelCase = 0 _UpperCAmelCase = 0 for i, name in enumerate(lowercase ): for letter in name: name_score += ord(lowercase ) - 64 total_score += (i + 1) * name_score _UpperCAmelCase = 0 return total_score if __name__ == "__main__": print(solution())
289
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase__ = 1_6 UpperCAmelCase__ = 3_2 def __UpperCAmelCase ( lowercase ,lowercase = 16 ): """simple docstring""" _UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) _UpperCAmelCase = DataLoader( tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase__ = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1": _UpperCAmelCase = 2 # Initialize accelerator _UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config["""lr"""] _UpperCAmelCase = int(config["""num_epochs"""] ) _UpperCAmelCase = int(config["""seed"""] ) _UpperCAmelCase = int(config["""batch_size"""] ) _UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowercase ) def inner_training_loop(lowercase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(lowercase ,lowercase ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowercase ,references=lowercase ,) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' ,lowercase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" ,) parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowercase ,lowercase ) if __name__ == "__main__": main()
289
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase__ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """IBertForMaskedLM""", """IBertForMultipleChoice""", """IBertForQuestionAnswering""", """IBertForSequenceClassification""", """IBertForTokenClassification""", """IBertModel""", """IBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
289
"""simple docstring""" import warnings warnings.warn( """memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """ """`from accelerate import find_executable_batch_size` to avoid this warning.""", FutureWarning, )
289
1
"""simple docstring""" import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser( description=( """Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned""" """ Distillation""" ) ) parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""]) parser.add_argument("""--model_name""", default="""roberta-large""", type=str) parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str) parser.add_argument("""--vocab_transform""", action="""store_true""") UpperCAmelCase__ = parser.parse_args() if args.model_type == "roberta": UpperCAmelCase__ = RobertaForMaskedLM.from_pretrained(args.model_name) UpperCAmelCase__ = """roberta""" elif args.model_type == "gpt2": UpperCAmelCase__ = GPTaLMHeadModel.from_pretrained(args.model_name) UpperCAmelCase__ = """transformer""" UpperCAmelCase__ = model.state_dict() UpperCAmelCase__ = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: UpperCAmelCase__ = state_dict[F'''{prefix}.{param_name}'''] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: UpperCAmelCase__ = F'''{prefix}.embeddings.{w}.weight''' UpperCAmelCase__ = state_dict[param_name] for w in ["weight", "bias"]: UpperCAmelCase__ = F'''{prefix}.embeddings.LayerNorm.{w}''' UpperCAmelCase__ = state_dict[param_name] # Transformer Blocks # UpperCAmelCase__ = 0 for teacher_idx in [0, 2, 4, 7, 9, 1_1]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: UpperCAmelCase__ = state_dict[ F'''{prefix}.h.{teacher_idx}.{layer}.{w}''' ] UpperCAmelCase__ = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias'''] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: UpperCAmelCase__ = state_dict[ F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}''' ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: UpperCAmelCase__ = state_dict[F'''{layer}'''] if args.vocab_transform: for w in ["weight", "bias"]: UpperCAmelCase__ = state_dict[F'''lm_head.dense.{w}'''] UpperCAmelCase__ = state_dict[F'''lm_head.layer_norm.{w}'''] elif args.model_type == "gpt2": for w in ["weight", "bias"]: UpperCAmelCase__ = state_dict[F'''{prefix}.ln_f.{w}'''] UpperCAmelCase__ = state_dict["""lm_head.weight"""] print(F'''N layers selected for distillation: {std_idx}''') print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
289
"""simple docstring""" import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin UpperCAmelCase__ = logging.get_logger(__name__) enable_full_determinism() class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = UNetaDModel _snake_case : List[str] = 'sample' @property def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (32, 32) _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : List[Any] ): return (3, 32, 32) @property def lowerCAmelCase_ ( self : Optional[Any] ): return (3, 32, 32) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = { """block_out_channels""": (32, 64), """down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""), """up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""), """attention_head_dim""": 3, """out_channels""": 3, """in_channels""": 3, """layers_per_block""": 2, """sample_size""": 32, } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : int = UNetaDModel _snake_case : Optional[Any] = 'sample' @property def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = 4 _UpperCAmelCase = 4 _UpperCAmelCase = (32, 32) _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : Optional[Any] ): return (4, 32, 32) @property def lowerCAmelCase_ ( self : Dict ): return (4, 32, 32) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = { """sample_size""": 32, """in_channels""": 4, """out_channels""": 4, """layers_per_block""": 2, """block_out_channels""": (32, 64), """attention_head_dim""": 32, """down_block_types""": ("""DownBlock2D""", """DownBlock2D"""), """up_block_types""": ("""UpBlock2D""", """UpBlock2D"""), } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) _UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model.to(__lowerCAmelCase ) _UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self : str ): # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model_accelerate.to(__lowerCAmelCase ) model_accelerate.eval() _UpperCAmelCase = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) _UpperCAmelCase = noise.to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) _UpperCAmelCase = model_accelerate(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained( """fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase , low_cpu_mem_usage=__lowerCAmelCase ) model_normal_load.to(__lowerCAmelCase ) model_normal_load.eval() _UpperCAmelCase = model_normal_load(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ) model.eval() model.to(__lowerCAmelCase ) _UpperCAmelCase = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) _UpperCAmelCase = noise.to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) ) class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[Any] = UNetaDModel _snake_case : str = 'sample' @property def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str=(32, 32) ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : Any ): return (3, 32, 32) @property def lowerCAmelCase_ ( self : Union[str, Any] ): return (3, 32, 32) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = { """block_out_channels""": [32, 64, 64, 64], """in_channels""": 3, """layers_per_block""": 1, """out_channels""": 3, """time_embedding_type""": """fourier""", """norm_eps""": 1e-6, """mid_block_scale_factor""": math.sqrt(2.0 ), """norm_num_groups""": None, """down_block_types""": [ """SkipDownBlock2D""", """AttnSkipDownBlock2D""", """SkipDownBlock2D""", """SkipDownBlock2D""", ], """up_block_types""": [ """SkipUpBlock2D""", """SkipUpBlock2D""", """AttnSkipUpBlock2D""", """SkipUpBlock2D""", ], } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict @slow def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) _UpperCAmelCase = self.dummy_input _UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(__lowerCAmelCase ) _UpperCAmelCase = noise _UpperCAmelCase = model(**__lowerCAmelCase ) assert image is not None, "Make sure output is not None" @slow def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ) model.to(__lowerCAmelCase ) _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (256, 256) _UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" ) model.to(__lowerCAmelCase ) _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (32, 32) _UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def lowerCAmelCase_ ( self : List[str] ): # not required for this model pass
289
1
"""simple docstring""" import heapq import sys import numpy as np UpperCAmelCase__ = tuple[int, int] class a : def __init__( self : Any ): _UpperCAmelCase = [] _UpperCAmelCase = set() def lowerCAmelCase_ ( self : List[str] ): if not self.empty(): return self.elements[0][0] else: return float("""inf""" ) def lowerCAmelCase_ ( self : Union[str, Any] ): return len(self.elements ) == 0 def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ): if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(__lowerCAmelCase ) else: # update # print("update", item) _UpperCAmelCase = [] ((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[Any] ): if item in self.set: self.set.remove(__lowerCAmelCase ) _UpperCAmelCase = [] ((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def lowerCAmelCase_ ( self : Optional[Any] ): return self.elements[0][1] def lowerCAmelCase_ ( self : Dict ): ((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements ) self.set.remove(__lowerCAmelCase ) return (priority, item) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" # euclidean distance _UpperCAmelCase = np.array(lowercase ) _UpperCAmelCase = np.array(lowercase ) return np.linalg.norm(a - b ) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" # integer division by time variable return consistent_heuristic(lowercase ,lowercase ) // t def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = g_function[start] + Wa * heuristics[i](lowercase ,lowercase ) return ans def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = np.chararray((n, n) ) for i in range(lowercase ): for j in range(lowercase ): _UpperCAmelCase = """*""" for i in range(lowercase ): for j in range(lowercase ): if (j, (n - 1) - i) in blocks: _UpperCAmelCase = """#""" _UpperCAmelCase = """-""" _UpperCAmelCase = back_pointer[goal] while x != start: ((_UpperCAmelCase) , (_UpperCAmelCase)) = x # print(x) _UpperCAmelCase = """-""" _UpperCAmelCase = back_pointer[x] _UpperCAmelCase = """-""" for i in range(lowercase ): for j in range(lowercase ): if (i, j) == (0, n - 1): print(grid[i][j] ,end=""" """ ) print("""<-- End position""" ,end=""" """ ) else: print(grid[i][j] ,end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) print("""PATH TAKEN BY THE ALGORITHM IS:-""" ) _UpperCAmelCase = back_pointer[goal] while x != start: print(lowercase ,end=""" """ ) _UpperCAmelCase = back_pointer[x] print(lowercase ) sys.exit() def __UpperCAmelCase ( lowercase ): """simple docstring""" if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,): """simple docstring""" for itera in range(lowercase ): open_list[itera].remove_element(lowercase ) # print("s", s) # print("j", j) ((_UpperCAmelCase) , (_UpperCAmelCase)) = s _UpperCAmelCase = (x - 1, y) _UpperCAmelCase = (x + 1, y) _UpperCAmelCase = (x, y + 1) _UpperCAmelCase = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(lowercase ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(lowercase ) _UpperCAmelCase = -1 _UpperCAmelCase = float("""inf""" ) if valid(lowercase ) and g_function[neighbours] > g_function[s] + 1: _UpperCAmelCase = g_function[s] + 1 _UpperCAmelCase = s if neighbours not in close_list_anchor: open_list[0].put(lowercase ,key(lowercase ,0 ,lowercase ,lowercase ) ) if neighbours not in close_list_inad: for var in range(1 ,lowercase ): if key(lowercase ,lowercase ,lowercase ,lowercase ) <= Wa * key( lowercase ,0 ,lowercase ,lowercase ): open_list[j].put( lowercase ,key(lowercase ,lowercase ,lowercase ,lowercase ) ) def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = [] for x in range(1 ,5 ): for y in range(1 ,6 ): some_list.append((x, y) ) for x in range(15 ,20 ): some_list.append((x, 17) ) for x in range(10 ,19 ): for y in range(1 ,15 ): some_list.append((x, y) ) # L block for x in range(1 ,4 ): for y in range(12 ,19 ): some_list.append((x, y) ) for x in range(3 ,13 ): for y in range(16 ,19 ): some_list.append((x, y) ) return some_list UpperCAmelCase__ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} UpperCAmelCase__ = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (1_0, 1), (1_1, 1), (1_2, 1), (1_3, 1), (1_4, 1), (1_5, 1), (1_6, 1), (1_7, 1), (1_8, 1), (1_9, 1), ] UpperCAmelCase__ = make_common_ground() UpperCAmelCase__ = blocks_blk # hyper parameters UpperCAmelCase__ = 1 UpperCAmelCase__ = 1 UpperCAmelCase__ = 2_0 UpperCAmelCase__ = 3 # one consistent and two other inconsistent # start and end destination UpperCAmelCase__ = (0, 0) UpperCAmelCase__ = (n - 1, n - 1) UpperCAmelCase__ = 1 def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = {start: 0, goal: float("""inf""" )} _UpperCAmelCase = {start: -1, goal: -1} _UpperCAmelCase = [] _UpperCAmelCase = set() for i in range(lowercase ): open_list.append(PriorityQueue() ) open_list[i].put(lowercase ,key(lowercase ,lowercase ,lowercase ,lowercase ) ) _UpperCAmelCase = [] _UpperCAmelCase = [] while open_list[0].minkey() < float("""inf""" ): for i in range(1 ,lowercase ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float("""inf""" ): do_something(lowercase ,lowercase ,lowercase ) else: _UpperCAmelCase , _UpperCAmelCase = open_list[i].top_show() visited.add(lowercase ) expand_state( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,) close_list_inad.append(lowercase ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float("""inf""" ): do_something(lowercase ,lowercase ,lowercase ) else: _UpperCAmelCase = open_list[0].top_show() visited.add(lowercase ) expand_state( lowercase ,0 ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,) close_list_anchor.append(lowercase ) print("""No path found to goal""" ) print() for i in range(n - 1 ,-1 ,-1 ): for j in range(lowercase ): if (j, i) in blocks: print("""#""" ,end=""" """ ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print("""*""" ,end=""" """ ) else: print("""-""" ,end=""" """ ) else: print("""*""" ,end=""" """ ) if (j, i) == (n - 1, n - 1): print("""<-- End position""" ,end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
289
"""simple docstring""" import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : int = StableUnCLIPPipeline _snake_case : str = TEXT_TO_IMAGE_PARAMS _snake_case : Any = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS _snake_case : str = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _snake_case : str = False def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = 32 _UpperCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) _UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=__lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) _UpperCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__lowerCAmelCase , num_layers=1 , ) torch.manual_seed(0 ) _UpperCAmelCase = DDPMScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , ) # regular denoising components torch.manual_seed(0 ) _UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=__lowerCAmelCase ) _UpperCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) _UpperCAmelCase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCAmelCase , layers_per_block=1 , upcast_attention=__lowerCAmelCase , use_linear_projection=__lowerCAmelCase , ) torch.manual_seed(0 ) _UpperCAmelCase = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , ) torch.manual_seed(0 ) _UpperCAmelCase = AutoencoderKL() _UpperCAmelCase = { # prior components """prior_tokenizer""": prior_tokenizer, """prior_text_encoder""": prior_text_encoder, """prior""": prior, """prior_scheduler""": prior_scheduler, # image noising components """image_normalizer""": image_normalizer, """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder, """unet""": unet, """scheduler""": scheduler, """vae""": vae, } return components def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=0 ): if str(__lowerCAmelCase ).startswith("""mps""" ): _UpperCAmelCase = torch.manual_seed(__lowerCAmelCase ) else: _UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _UpperCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """prior_num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = torch_device == """cpu""" self._test_attention_slicing_forward_pass(test_max_difference=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=__lowerCAmelCase ) @slow @require_torch_gpu class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" ) _UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _UpperCAmelCase = pipe("""anime turle""" , generator=__lowerCAmelCase , output_type="""np""" ) _UpperCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) _UpperCAmelCase = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _UpperCAmelCase = pipe( """anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , ) _UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
289
1
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def __UpperCAmelCase ( lowercase ): """simple docstring""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f) or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) # or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) # or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) # or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) # or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) # or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f) or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) # ): # return True return False def __UpperCAmelCase ( lowercase ): """simple docstring""" # word like '180' or '身高' or '神' for char in word: _UpperCAmelCase = ord(lowercase ) if not _is_chinese_char(lowercase ): return 0 return 1 def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = set() for token in tokens: _UpperCAmelCase = len(lowercase ) > 1 and is_chinese(lowercase ) if chinese_word: word_set.add(lowercase ) _UpperCAmelCase = list(lowercase ) return word_list def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if not chinese_word_set: return bert_tokens _UpperCAmelCase = max([len(lowercase ) for w in chinese_word_set] ) _UpperCAmelCase = bert_tokens _UpperCAmelCase , _UpperCAmelCase = 0, len(lowercase ) while start < end: _UpperCAmelCase = True if is_chinese(bert_word[start] ): _UpperCAmelCase = min(end - start ,lowercase ) for i in range(lowercase ,1 ,-1 ): _UpperCAmelCase = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 ,start + i ): _UpperCAmelCase = """##""" + bert_word[j] _UpperCAmelCase = start + i _UpperCAmelCase = False break if single_word: start += 1 return bert_word def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = [] for i in range(0 ,len(lowercase ) ,1_00 ): _UpperCAmelCase = ltp_tokenizer.pipeline(lines[i : i + 1_00] ,tasks=["""cws"""] ).cws _UpperCAmelCase = [get_chinese_word(lowercase ) for r in res] ltp_res.extend(lowercase ) assert len(lowercase ) == len(lowercase ) _UpperCAmelCase = [] for i in range(0 ,len(lowercase ) ,1_00 ): _UpperCAmelCase = bert_tokenizer(lines[i : i + 1_00] ,add_special_tokens=lowercase ,truncation=lowercase ,max_length=5_12 ) bert_res.extend(res["""input_ids"""] ) assert len(lowercase ) == len(lowercase ) _UpperCAmelCase = [] for input_ids, chinese_word in zip(lowercase ,lowercase ): _UpperCAmelCase = [] for id in input_ids: _UpperCAmelCase = bert_tokenizer._convert_id_to_token(lowercase ) input_tokens.append(lowercase ) _UpperCAmelCase = add_sub_symbol(lowercase ,lowercase ) _UpperCAmelCase = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(lowercase ): if token[:2] == "##": _UpperCAmelCase = token[2:] # save chinese tokens' pos if len(lowercase ) == 1 and _is_chinese_char(ord(lowercase ) ): ref_id.append(lowercase ) ref_ids.append(lowercase ) assert len(lowercase ) == len(lowercase ) return ref_ids def __UpperCAmelCase ( lowercase ): """simple docstring""" # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name ,"""r""" ,encoding="""utf-8""" ) as f: _UpperCAmelCase = f.readlines() _UpperCAmelCase = [line.strip() for line in data if len(lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' _UpperCAmelCase = LTP(args.ltp ) # faster in GPU device _UpperCAmelCase = BertTokenizer.from_pretrained(args.bert ) _UpperCAmelCase = prepare_ref(lowercase ,lowercase ,lowercase ) with open(args.save_path ,"""w""" ,encoding="""utf-8""" ) as f: _UpperCAmelCase = [json.dumps(lowercase ) + """\n""" for ref in ref_ids] f.writelines(lowercase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser(description="""prepare_chinese_ref""") parser.add_argument( """--file_name""", required=False, type=str, default="""./resources/chinese-demo.txt""", help="""file need process, same as training data in lm""", ) parser.add_argument( """--ltp""", required=False, type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path""", ) parser.add_argument( """--bert""", required=False, type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""", ) parser.add_argument( """--save_path""", required=False, type=str, default="""./resources/ref.txt""", help="""path to save res""", ) UpperCAmelCase__ = parser.parse_args() main(args)
289
"""simple docstring""" from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
289
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = len(lowercase ) + 1 _UpperCAmelCase = len(lowercase ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. _UpperCAmelCase = [[0 for i in range(lowercase )] for j in range(lowercase )] # since string of zero length match pattern of zero length _UpperCAmelCase = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 ,lowercase ): _UpperCAmelCase = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 ,lowercase ): _UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == """*""" else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 ,lowercase ): for j in range(1 ,lowercase ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": _UpperCAmelCase = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: _UpperCAmelCase = 1 elif pattern[j - 2] in (input_string[i - 1], "."): _UpperCAmelCase = dp[i - 1][j] else: _UpperCAmelCase = 0 else: _UpperCAmelCase = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") UpperCAmelCase__ = """aab""" UpperCAmelCase__ = """c*a*b""" # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(F'''{input_string} matches the given pattern {pattern}''') else: print(F'''{input_string} does not match with the given pattern {pattern}''')
289
"""simple docstring""" import requests UpperCAmelCase__ = """""" # <-- Put your OpenWeatherMap appid here! UpperCAmelCase__ = """https://api.openweathermap.org/data/2.5/""" def __UpperCAmelCase ( lowercase = "Chicago" ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """weather""" ,params=locals() ).json() def __UpperCAmelCase ( lowercase = "Kolkata, India" ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """forecast""" ,params=locals() ).json() def __UpperCAmelCase ( lowercase = 55.68 ,lowercase = 12.57 ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """onecall""" ,params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: UpperCAmelCase__ = input("""Enter a location:""").strip() if location: pprint(current_weather(location)) else: break
289
1
"""simple docstring""" import math def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = 0 _UpperCAmelCase = 0 while num > 0: _UpperCAmelCase = num % 8 _UpperCAmelCase = octal + (remainder * math.floor(math.pow(10 ,lowercase ) )) counter += 1 _UpperCAmelCase = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return f'''0o{int(lowercase )}''' def __UpperCAmelCase ( ): """simple docstring""" print("""\n2 in octal is:""" ) print(decimal_to_octal(2 ) ) # = 2 print("""\n8 in octal is:""" ) print(decimal_to_octal(8 ) ) # = 10 print("""\n65 in octal is:""" ) print(decimal_to_octal(65 ) ) # = 101 print("""\n216 in octal is:""" ) print(decimal_to_octal(2_16 ) ) # = 330 print("""\n512 in octal is:""" ) print(decimal_to_octal(5_12 ) ) # = 1000 print("""\n""" ) if __name__ == "__main__": main()
289
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = get_failure_array(lowercase ) # 2) Step through text searching for pattern _UpperCAmelCase , _UpperCAmelCase = 0, 0 # index into text, pattern while i < len(lowercase ): if pattern[j] == text[i]: if j == (len(lowercase ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: _UpperCAmelCase = failure[j - 1] continue i += 1 return False def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [0] _UpperCAmelCase = 0 _UpperCAmelCase = 1 while j < len(lowercase ): if pattern[i] == pattern[j]: i += 1 elif i > 0: _UpperCAmelCase = failure[i - 1] continue j += 1 failure.append(lowercase ) return failure if __name__ == "__main__": # Test 1) UpperCAmelCase__ = """abc1abc12""" UpperCAmelCase__ = """alskfjaldsabc1abc1abc12k23adsfabcabc""" UpperCAmelCase__ = """alskfjaldsk23adsfabcabc""" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) UpperCAmelCase__ = """ABABX""" UpperCAmelCase__ = """ABABZABABYABABX""" assert kmp(pattern, text) # Test 3) UpperCAmelCase__ = """AAAB""" UpperCAmelCase__ = """ABAAAAAB""" assert kmp(pattern, text) # Test 4) UpperCAmelCase__ = """abcdabcy""" UpperCAmelCase__ = """abcxabcdabxabcdabcdabcy""" assert kmp(pattern, text) # Test 5) UpperCAmelCase__ = """aabaabaaa""" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
289
1
"""simple docstring""" class a : def __init__( self : Optional[int] , __lowerCAmelCase : Any ): _UpperCAmelCase = val _UpperCAmelCase = None _UpperCAmelCase = None def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str ): if self.val: if val < self.val: if self.left is None: _UpperCAmelCase = Node(__lowerCAmelCase ) else: self.left.insert(__lowerCAmelCase ) elif val > self.val: if self.right is None: _UpperCAmelCase = Node(__lowerCAmelCase ) else: self.right.insert(__lowerCAmelCase ) else: _UpperCAmelCase = val def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" # Recursive traversal if root: inorder(root.left ,lowercase ) res.append(root.val ) inorder(root.right ,lowercase ) def __UpperCAmelCase ( lowercase ): """simple docstring""" # Build BST if len(lowercase ) == 0: return arr _UpperCAmelCase = Node(arr[0] ) for i in range(1 ,len(lowercase ) ): root.insert(arr[i] ) # Traverse BST in order. _UpperCAmelCase = [] inorder(lowercase ,lowercase ) return res if __name__ == "__main__": print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
289
"""simple docstring""" from sklearn.metrics import recall_score import datasets UpperCAmelCase__ = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ UpperCAmelCase__ = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ UpperCAmelCase__ = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): def lowerCAmelCase_ ( self : Tuple ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]="binary" , __lowerCAmelCase : Any=None , __lowerCAmelCase : int="warn" , ): _UpperCAmelCase = recall_score( __lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase , zero_division=__lowerCAmelCase , ) return {"recall": float(__lowerCAmelCase ) if score.size == 1 else score}
289
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class a : _snake_case : Any = LEDConfig _snake_case : List[Any] = {} _snake_case : Optional[int] = 'gelu' def __init__( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict=13 , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=99 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Optional[int]=37 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[Any]=20 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : int=0 , __lowerCAmelCase : List[str]=4 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = eos_token_id _UpperCAmelCase = pad_token_id _UpperCAmelCase = bos_token_id _UpperCAmelCase = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after _UpperCAmelCase = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests _UpperCAmelCase = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) _UpperCAmelCase = prepare_led_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = tf.concat( [tf.zeros_like(__lowerCAmelCase )[:, :-1], tf.ones_like(__lowerCAmelCase )[:, -1:]] , axis=-1 , ) _UpperCAmelCase = global_attention_mask return config, inputs_dict def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] ): _UpperCAmelCase = TFLEDModel(config=__lowerCAmelCase ).get_decoder() _UpperCAmelCase = inputs_dict["""input_ids"""] _UpperCAmelCase = input_ids[:1, :] _UpperCAmelCase = inputs_dict["""attention_mask"""][:1, :] _UpperCAmelCase = 1 # first forward pass _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) _UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 ) _UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx] _UpperCAmelCase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ,lowercase=None ,lowercase=None ,): """simple docstring""" if attention_mask is None: _UpperCAmelCase = tf.cast(tf.math.not_equal(lowercase ,config.pad_token_id ) ,tf.inta ) if decoder_attention_mask is None: _UpperCAmelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ), ] ,axis=-1 ,) if head_mask is None: _UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Tuple = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () _snake_case : int = (TFLEDForConditionalGeneration,) if is_tf_available() else () _snake_case : Dict = ( { 'conversational': TFLEDForConditionalGeneration, 'feature-extraction': TFLEDModel, 'summarization': TFLEDForConditionalGeneration, 'text2text-generation': TFLEDForConditionalGeneration, 'translation': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) _snake_case : int = True _snake_case : Optional[Any] = False _snake_case : Dict = False _snake_case : List[Any] = False def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = TFLEDModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = tf.zeros_like(inputs_dict["""attention_mask"""] ) _UpperCAmelCase = 2 _UpperCAmelCase = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , ) _UpperCAmelCase = True _UpperCAmelCase = self.model_tester.seq_length _UpperCAmelCase = self.model_tester.encoder_seq_length def check_decoder_attentions_output(__lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = outputs.decoder_attentions self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(__lowerCAmelCase : str ): _UpperCAmelCase = [t.numpy() for t in outputs.encoder_attentions] _UpperCAmelCase = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = model_class(__lowerCAmelCase ) _UpperCAmelCase = model(self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) _UpperCAmelCase = len(__lowerCAmelCase ) self.assertEqual(config.output_hidden_states , __lowerCAmelCase ) check_encoder_attentions_output(__lowerCAmelCase ) if self.is_encoder_decoder: _UpperCAmelCase = model_class(__lowerCAmelCase ) _UpperCAmelCase = model(self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) self.assertEqual(config.output_hidden_states , __lowerCAmelCase ) check_decoder_attentions_output(__lowerCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _UpperCAmelCase = True _UpperCAmelCase = model_class(__lowerCAmelCase ) _UpperCAmelCase = model(self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) self.assertEqual(config.output_hidden_states , __lowerCAmelCase ) check_encoder_attentions_output(__lowerCAmelCase ) # Check attention is always last and order is fine _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = model_class(__lowerCAmelCase ) _UpperCAmelCase = model(self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , __lowerCAmelCase ) check_encoder_attentions_output(__lowerCAmelCase ) @unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" ) def lowerCAmelCase_ ( self : Tuple ): pass def lowerCAmelCase_ ( self : Union[str, Any] ): # TODO: Head-masking not yet implement pass def __UpperCAmelCase ( lowercase ): """simple docstring""" return tf.constant(lowercase ,dtype=tf.intaa ) UpperCAmelCase__ = 1E-4 @slow @require_tf class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led # change to intended input here _UpperCAmelCase = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] ) _UpperCAmelCase = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] ) _UpperCAmelCase = prepare_led_inputs_dict(model.config , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model(**__lowerCAmelCase )[0] _UpperCAmelCase = (1, 1024, 768) self.assertEqual(output.shape , __lowerCAmelCase ) # change to expected output here _UpperCAmelCase = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-3 ) def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ) # change to intended input here _UpperCAmelCase = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] ) _UpperCAmelCase = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] ) _UpperCAmelCase = prepare_led_inputs_dict(model.config , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model(**__lowerCAmelCase )[0] _UpperCAmelCase = (1, 1024, model.config.vocab_size) self.assertEqual(output.shape , __lowerCAmelCase ) # change to expected output here _UpperCAmelCase = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-3 , rtol=1e-3 )
289
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase__ = """platform""" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class a : _snake_case : Tuple = PegasusConfig _snake_case : int = {} _snake_case : str = 'gelu' def __init__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=99 , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=20 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Any=0 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = eos_token_id _UpperCAmelCase = pad_token_id _UpperCAmelCase = bos_token_id def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) _UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) _UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _UpperCAmelCase = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return config, inputs_dict def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ): _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ): _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _UpperCAmelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ,): """simple docstring""" if attention_mask is None: _UpperCAmelCase = np.not_equal(lowercase ,config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: _UpperCAmelCase = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ), ] ,axis=-1 ,) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Dict = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) _snake_case : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () _snake_case : Optional[Any] = True _snake_case : List[str] = False _snake_case : Dict = False _snake_case : str = False def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = FlaxPegasusModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_class(__lowerCAmelCase ) @jax.jit def encode_jitted(__lowerCAmelCase : str , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Dict ): return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase ) with self.subTest("""JIT Enabled""" ): _UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = model_class(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) _UpperCAmelCase = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(__lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ): return model.decode( decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , ) with self.subTest("""JIT Enabled""" ): _UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCAmelCase_ ( self : Optional[int] ): for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__lowerCAmelCase ) _UpperCAmelCase = np.ones((1, 1) ) _UpperCAmelCase = model(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) _UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) _UpperCAmelCase = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] _UpperCAmelCase = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] _UpperCAmelCase = tokenizer(__lowerCAmelCase , return_tensors="""np""" , truncation=__lowerCAmelCase , max_length=512 , padding=__lowerCAmelCase ) _UpperCAmelCase = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences _UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) assert tgt_text == decoded
289
1
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def __UpperCAmelCase ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(lowercase ): requests.request("""GET""" ,"""https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" ,"""https://huggingface.co""" ,timeout=1.0 ) @pytest.mark.integration def __UpperCAmelCase ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" ,"""https://huggingface.co""" ) def __UpperCAmelCase ( ): """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(lowercase ): http_head("""https://huggingface.co""" )
289
"""simple docstring""" import math def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [] _UpperCAmelCase = 2 _UpperCAmelCase = int(math.sqrt(lowercase ) ) # Size of every segment _UpperCAmelCase = [True] * (end + 1) _UpperCAmelCase = [] while start <= end: if temp[start] is True: in_prime.append(lowercase ) for i in range(start * start ,end + 1 ,lowercase ): _UpperCAmelCase = False start += 1 prime += in_prime _UpperCAmelCase = end + 1 _UpperCAmelCase = min(2 * end ,lowercase ) while low <= n: _UpperCAmelCase = [True] * (high - low + 1) for each in in_prime: _UpperCAmelCase = math.floor(low / each ) * each if t < low: t += each for j in range(lowercase ,high + 1 ,lowercase ): _UpperCAmelCase = False for j in range(len(lowercase ) ): if temp[j] is True: prime.append(j + low ) _UpperCAmelCase = high + 1 _UpperCAmelCase = min(high + end ,lowercase ) return prime print(sieve(1_0**6))
289
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ): """simple docstring""" return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") ) def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = credit_card_number _UpperCAmelCase = 0 _UpperCAmelCase = len(lowercase ) - 2 for i in range(lowercase ,-1 ,-2 ): # double the value of every second digit _UpperCAmelCase = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 _UpperCAmelCase = cc_number[:i] + str(lowercase ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(lowercase ) - 1 ,-1 ,-2 ): total += int(cc_number[i] ) return total % 10 == 0 def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = f'''{credit_card_number} is an invalid credit card number because''' if not credit_card_number.isdigit(): print(f'''{error_message} it has nonnumerical characters.''' ) return False if not 13 <= len(lowercase ) <= 16: print(f'''{error_message} of its length.''' ) return False if not validate_initial_digits(lowercase ): print(f'''{error_message} of its first two digits.''' ) return False if not luhn_validation(lowercase ): print(f'''{error_message} it fails the Luhn check.''' ) return False print(f'''{credit_card_number} is a valid credit card number.''' ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("""4111111111111111""") validate_credit_card_number("""32323""")
289
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _UpperCAmelCase = TapasConfig.from_json_file(lowercase ) # set absolute/relative position embeddings parameter _UpperCAmelCase = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "WTQ": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = True # hparam_utils.py hparams _UpperCAmelCase = 0.66_46_94 _UpperCAmelCase = 0.20_79_51 _UpperCAmelCase = 0.12_11_94 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = 0.0_35_25_13 _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = False # hparam_utils.py hparams _UpperCAmelCase = 36.45_19 _UpperCAmelCase = 0.90_34_21 _UpperCAmelCase = 2_22.0_88 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = 0.76_31_41 _UpperCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "TABFACT": _UpperCAmelCase = TapasForSequenceClassification(config=lowercase ) elif task == "MLM": _UpperCAmelCase = TapasForMaskedLM(config=lowercase ) elif task == "INTERMEDIATE_PRETRAINING": _UpperCAmelCase = TapasModel(config=lowercase ) else: raise ValueError(f'''Task {task} not supported.''' ) print(f'''Building PyTorch model from configuration: {config}''' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(lowercase ,lowercase ,lowercase ) # Save pytorch-model (weights and configuration) print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(lowercase ) # Save tokenizer files print(f'''Save tokenizer files to {pytorch_dump_path}''' ) _UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" ,model_max_length=5_12 ) tokenizer.save_pretrained(lowercase ) print("""Used relative position embeddings:""" ,model.config.reset_position_index_per_cell ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
289
1
"""simple docstring""" import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss UpperCAmelCase__ = pytest.mark.integration @require_faiss class a ( lowerCAmelCase_ ): def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(__lowerCAmelCase ) for x in np.arange(30 ).tolist()]} ) return dset def lowerCAmelCase_ ( self : Optional[Any] ): import faiss _UpperCAmelCase = self._create_dummy_dataset() _UpperCAmelCase = dset.map( lambda __lowerCAmelCase , __lowerCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ) _UpperCAmelCase = dset.add_faiss_index("""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) _UpperCAmelCase , _UpperCAmelCase = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" ) dset.drop_index("""vecs""" ) def lowerCAmelCase_ ( self : Tuple ): import faiss _UpperCAmelCase = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) _UpperCAmelCase , _UpperCAmelCase = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" ) def lowerCAmelCase_ ( self : Any ): import faiss _UpperCAmelCase = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__lowerCAmelCase ) as tmp_file: dset.save_faiss_index("""vecs""" , tmp_file.name ) dset.load_faiss_index("""vecs2""" , tmp_file.name ) os.unlink(tmp_file.name ) _UpperCAmelCase , _UpperCAmelCase = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" ) dset.drop_index("""vecs""" ) self.assertRaises(__lowerCAmelCase , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) ) def lowerCAmelCase_ ( self : Optional[int] ): from elasticsearch import Elasticsearch _UpperCAmelCase = self._create_dummy_dataset() with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch( """elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk: _UpperCAmelCase = {"""acknowledged""": True} mocked_bulk.return_value([(True, None)] * 30 ) _UpperCAmelCase = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 29}]}} _UpperCAmelCase = Elasticsearch() dset.add_elasticsearch_index("""filename""" , es_client=__lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = dset.get_nearest_examples("""filename""" , """my_name-train_29""" ) self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" ) @require_faiss class a ( lowerCAmelCase_ ): def lowerCAmelCase_ ( self : List[Any] ): import faiss _UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query _UpperCAmelCase = np.zeros(5 , dtype=np.floataa ) _UpperCAmelCase = 1 _UpperCAmelCase , _UpperCAmelCase = index.search(__lowerCAmelCase ) self.assertRaises(__lowerCAmelCase , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries _UpperCAmelCase = np.eye(5 , dtype=np.floataa )[::-1] _UpperCAmelCase , _UpperCAmelCase = index.search_batch(__lowerCAmelCase ) self.assertRaises(__lowerCAmelCase , index.search_batch , queries[0] ) _UpperCAmelCase = [scores[0] for scores in total_scores] _UpperCAmelCase = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCAmelCase ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , __lowerCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): import faiss _UpperCAmelCase = FaissIndex(string_factory="""Flat""" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) _UpperCAmelCase = FaissIndex(string_factory="""LSH""" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(__lowerCAmelCase ): _UpperCAmelCase = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) ) def lowerCAmelCase_ ( self : List[str] ): import faiss _UpperCAmelCase = faiss.IndexFlat(5 ) _UpperCAmelCase = FaissIndex(custom_index=__lowerCAmelCase ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def lowerCAmelCase_ ( self : List[Any] ): import faiss _UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__lowerCAmelCase ) as tmp_file: index.save(tmp_file.name ) _UpperCAmelCase = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) _UpperCAmelCase = np.zeros(5 , dtype=np.floataa ) _UpperCAmelCase = 1 _UpperCAmelCase , _UpperCAmelCase = index.search(__lowerCAmelCase ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def __UpperCAmelCase ( lowercase ): """simple docstring""" import faiss _UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 ,dtype=np.floataa ) ) _UpperCAmelCase = """index.faiss""" _UpperCAmelCase = f'''mock://{index_name}''' index.save(lowercase ,storage_options=mockfs.storage_options ) _UpperCAmelCase = FaissIndex.load(lowercase ,storage_options=mockfs.storage_options ) _UpperCAmelCase = np.zeros(5 ,dtype=np.floataa ) _UpperCAmelCase = 1 _UpperCAmelCase , _UpperCAmelCase = index.search(lowercase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class a ( lowerCAmelCase_ ): def lowerCAmelCase_ ( self : List[Any] ): from elasticsearch import Elasticsearch with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch( """elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk: _UpperCAmelCase = Elasticsearch() _UpperCAmelCase = {"""acknowledged""": True} _UpperCAmelCase = ElasticSearchIndex(es_client=__lowerCAmelCase ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["""foo""", """bar""", """foobar"""] ) # single query _UpperCAmelCase = """foo""" _UpperCAmelCase = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}} _UpperCAmelCase , _UpperCAmelCase = index.search(__lowerCAmelCase ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout _UpperCAmelCase = """foo""" _UpperCAmelCase = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}} _UpperCAmelCase , _UpperCAmelCase = index.search(__lowerCAmelCase , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries _UpperCAmelCase = ["""foo""", """bar""", """foobar"""] _UpperCAmelCase = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}} _UpperCAmelCase , _UpperCAmelCase = index.search_batch(__lowerCAmelCase ) _UpperCAmelCase = [scores[0] for scores in total_scores] _UpperCAmelCase = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCAmelCase ) , 0 ) self.assertListEqual([1, 1, 1] , __lowerCAmelCase ) # batched queries with timeout _UpperCAmelCase = ["""foo""", """bar""", """foobar"""] _UpperCAmelCase = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}} _UpperCAmelCase , _UpperCAmelCase = index.search_batch(__lowerCAmelCase , request_timeout=30 ) _UpperCAmelCase = [scores[0] for scores in total_scores] _UpperCAmelCase = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCAmelCase ) , 0 ) self.assertListEqual([1, 1, 1] , __lowerCAmelCase )
289
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml UpperCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" def run_func(lowercase ): @wraps(lowercase ) def run_in_eager_mode(*lowercase ,**lowercase ): return func(*lowercase ,**lowercase ) @wraps(lowercase ) @tf.function(experimental_compile=lowercase ) def run_in_graph_mode(*lowercase ,**lowercase ): return func(*lowercase ,**lowercase ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = random.Random() _UpperCAmelCase = [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(lowercase ,shape=(batch_size, sequence_length) ,dtype=tf.intaa ) class a ( lowerCAmelCase_ ): _snake_case : TensorFlowBenchmarkArguments _snake_case : PretrainedConfig _snake_case : str = "TensorFlow" @property def lowerCAmelCase_ ( self : Union[str, Any] ): return tf.__version__ def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): # initialize GPU on separate process _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_speed(_inference ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_speed(_train ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase ) _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_memory(_inference ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase ) _UpperCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return self._measure_memory(_train ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _UpperCAmelCase = ( hasattr(__lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , __lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_cls(__lowerCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _UpperCAmelCase = TF_MODEL_MAPPING[config.__class__](__lowerCAmelCase ) # encoder-decoder has vocab size saved differently _UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , training=__lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(__lowerCAmelCase , training=__lowerCAmelCase ) _UpperCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _UpperCAmelCase = ( hasattr(__lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , __lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_cls(__lowerCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _UpperCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCAmelCase ) # encoder-decoder has vocab size saved differently _UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): _UpperCAmelCase = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0] _UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): _UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0] _UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables ) return gradients _UpperCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Any ): with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(__lowerCAmelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _UpperCAmelCase = timeit.repeat( __lowerCAmelCase , repeat=self.args.repeat , number=10 , ) return min(__lowerCAmelCase ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Callable[[], None] ): logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _UpperCAmelCase = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _UpperCAmelCase = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _UpperCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _UpperCAmelCase = nvml.nvmlDeviceGetMemoryInfo(__lowerCAmelCase ) _UpperCAmelCase = meminfo.used _UpperCAmelCase = Memory(__lowerCAmelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _UpperCAmelCase = None else: _UpperCAmelCase = measure_peak_memory_cpu(__lowerCAmelCase ) _UpperCAmelCase = Memory(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else memory_bytes if self.args.trace_memory_line_by_line: _UpperCAmelCase = stop_memory_tracing(__lowerCAmelCase ) if memory is None: _UpperCAmelCase = summary.total else: _UpperCAmelCase = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
289
1
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = list(range(len(lowercase ) ) ) _UpperCAmelCase = [v / w for v, w in zip(lowercase ,lowercase )] index.sort(key=lambda lowercase : ratio[i] ,reverse=lowercase ) _UpperCAmelCase = 0 _UpperCAmelCase = [0] * len(lowercase ) for i in index: if weight[i] <= capacity: _UpperCAmelCase = 1 max_value += value[i] capacity -= weight[i] else: _UpperCAmelCase = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
289
"""simple docstring""" from math import pow def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,): """simple docstring""" if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count _UpperCAmelCase = int(pow(lowercase ,lowercase ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n _UpperCAmelCase , _UpperCAmelCase = backtrack( lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. _UpperCAmelCase , _UpperCAmelCase = backtrack( lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase ) return current_sum, solutions_count def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10): raise ValueError( """Invalid input\n""" """needed_sum must be between 1 and 1000, power between 2 and 10.""" ) return backtrack(lowercase ,lowercase ,1 ,0 ,0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
289
1
"""simple docstring""" from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,): """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = coefficient_matrix.shape _UpperCAmelCase , _UpperCAmelCase = constant_matrix.shape if rowsa != colsa: _UpperCAmelCase = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}''' raise ValueError(lowercase ) if colsa != 1: _UpperCAmelCase = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}''' raise ValueError(lowercase ) if rowsa != rowsa: _UpperCAmelCase = ( """Coefficient and constant matrices dimensions must be nxn and nx1 but """ f'''received {rowsa}x{colsa} and {rowsa}x{colsa}''' ) raise ValueError(lowercase ) if len(lowercase ) != rowsa: _UpperCAmelCase = ( """Number of initial values must be equal to number of rows in coefficient """ f'''matrix but received {len(lowercase )} and {rowsa}''' ) raise ValueError(lowercase ) if iterations <= 0: raise ValueError("""Iterations must be at least 1""" ) _UpperCAmelCase = np.concatenate( (coefficient_matrix, constant_matrix) ,axis=1 ) _UpperCAmelCase , _UpperCAmelCase = table.shape strictly_diagonally_dominant(lowercase ) # Iterates the whole matrix for given number of times for _ in range(lowercase ): _UpperCAmelCase = [] for row in range(lowercase ): _UpperCAmelCase = 0 for col in range(lowercase ): if col == row: _UpperCAmelCase = table[row][col] elif col == cols - 1: _UpperCAmelCase = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] _UpperCAmelCase = (temp + val) / denom new_val.append(lowercase ) _UpperCAmelCase = new_val return [float(lowercase ) for i in new_val] def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = table.shape _UpperCAmelCase = True for i in range(0 ,lowercase ): _UpperCAmelCase = 0 for j in range(0 ,cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
289
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } UpperCAmelCase__ = { """b0""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 2_2_4, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 2_4_0, """dropout_rate""": 0.2, """dw_padding""": [1_6], }, """b2""": { """hidden_dim""": 1_4_0_8, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 2_6_0, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 1_6], }, """b3""": { """hidden_dim""": 1_5_3_6, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 3_0_0, """dropout_rate""": 0.3, """dw_padding""": [5, 1_8], }, """b4""": { """hidden_dim""": 1_7_9_2, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 3_8_0, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_0_4_8, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 4_5_6, """dropout_rate""": 0.4, """dw_padding""": [1_3, 2_7], }, """b6""": { """hidden_dim""": 2_3_0_4, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 5_2_8, """dropout_rate""": 0.5, """dw_padding""": [3_1], }, """b7""": { """hidden_dim""": 2_5_6_0, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 6_0_0, """dropout_rate""": 0.5, """dw_padding""": [1_8], }, } def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = EfficientNetConfig() _UpperCAmelCase = CONFIG_MAP[model_name]["""hidden_dim"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""width_coef"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""depth_coef"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""dropout_rate"""] _UpperCAmelCase = CONFIG_MAP[model_name]["""dw_padding"""] _UpperCAmelCase = """huggingface/label-files""" _UpperCAmelCase = """imagenet-1k-id2label.json""" _UpperCAmelCase = 10_00 _UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) ) _UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw ) return im def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = EfficientNetImageProcessor( size={"""height""": size, """width""": size} ,image_mean=[0.4_85, 0.4_56, 0.4_06] ,image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] ,do_center_crop=lowercase ,) return preprocessor def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] _UpperCAmelCase = sorted(set(lowercase ) ) _UpperCAmelCase = len(lowercase ) _UpperCAmelCase = {b: str(lowercase ) for b, i in zip(lowercase ,range(lowercase ) )} _UpperCAmelCase = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: _UpperCAmelCase = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) _UpperCAmelCase = {} for item in rename_keys: if item[0] in original_param_names: _UpperCAmelCase = """efficientnet.""" + item[1] _UpperCAmelCase = """classifier.weight""" _UpperCAmelCase = """classifier.bias""" return key_mapping def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue _UpperCAmelCase = key_mapping[key] if "_conv" in key and "kernel" in key: _UpperCAmelCase = torch.from_numpy(lowercase ).permute(3 ,2 ,0 ,1 ) elif "depthwise_kernel" in key: _UpperCAmelCase = torch.from_numpy(lowercase ).permute(2 ,3 ,0 ,1 ) elif "kernel" in key: _UpperCAmelCase = torch.from_numpy(np.transpose(lowercase ) ) else: _UpperCAmelCase = torch.from_numpy(lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase ) @torch.no_grad() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = model_classes[model_name]( include_top=lowercase ,weights="""imagenet""" ,input_tensor=lowercase ,input_shape=lowercase ,pooling=lowercase ,classes=10_00 ,classifier_activation="""softmax""" ,) _UpperCAmelCase = original_model.trainable_variables _UpperCAmelCase = original_model.non_trainable_variables _UpperCAmelCase = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: _UpperCAmelCase = param.numpy() _UpperCAmelCase = list(tf_params.keys() ) # Load HuggingFace model _UpperCAmelCase = get_efficientnet_config(lowercase ) _UpperCAmelCase = EfficientNetForImageClassification(lowercase ).eval() _UpperCAmelCase = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) _UpperCAmelCase = rename_keys(lowercase ) replace_params(lowercase ,lowercase ,lowercase ) # Initialize preprocessor and preprocess input image _UpperCAmelCase = convert_image_processor(lowercase ) _UpperCAmelCase = preprocessor(images=prepare_img() ,return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): _UpperCAmelCase = hf_model(**lowercase ) _UpperCAmelCase = outputs.logits.detach().numpy() # Original model inference _UpperCAmelCase = False _UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST ) _UpperCAmelCase = image.img_to_array(lowercase ) _UpperCAmelCase = np.expand_dims(lowercase ,axis=0 ) _UpperCAmelCase = original_model.predict(lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase ,lowercase ,atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase ): os.mkdir(lowercase ) # Save converted model and image processor hf_model.save_pretrained(lowercase ) preprocessor.save_pretrained(lowercase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) _UpperCAmelCase = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowercase ) hf_model.push_to_hub(lowercase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") UpperCAmelCase__ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
289
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase__ = logging.get_logger(__name__) class a ( lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Union[str, Any] = 'maskformer-swin' _snake_case : int = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Dict , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : str=96 , __lowerCAmelCase : List[str]=[2, 2, 6, 2] , __lowerCAmelCase : Tuple=[3, 6, 12, 24] , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[Any]=4.0 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Tuple=1e-5 , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : Optional[int] , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = len(__lowerCAmelCase ) _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _UpperCAmelCase = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) ) _UpperCAmelCase = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(__lowerCAmelCase ) + 1 )] _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices( out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
289
"""simple docstring""" from __future__ import annotations from collections import Counter from random import random class a : def __init__( self : Union[str, Any] ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : float ): if nodea not in self.connections: self.add_node(__lowerCAmelCase ) if nodea not in self.connections: self.add_node(__lowerCAmelCase ) _UpperCAmelCase = probability def lowerCAmelCase_ ( self : Optional[Any] ): return list(self.connections ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ): _UpperCAmelCase = 0 _UpperCAmelCase = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(lowercase ,lowercase ,lowercase ) _UpperCAmelCase = Counter(graph.get_nodes() ) _UpperCAmelCase = start for _ in range(lowercase ): _UpperCAmelCase = graph.transition(lowercase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
289
1
"""simple docstring""" import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = argparse.ArgumentParser() parser.add_argument("""--model_ckpt""" ,type=lowercase ,default="""microsoft/unixcoder-base-nine""" ) parser.add_argument("""--num_epochs""" ,type=lowercase ,default=5 ) parser.add_argument("""--batch_size""" ,type=lowercase ,default=6 ) parser.add_argument("""--gradient_accumulation_steps""" ,type=lowercase ,default=1 ) parser.add_argument("""--freeze""" ,type=lowercase ,default=lowercase ) parser.add_argument("""--learning_rate""" ,type=lowercase ,default=5E-4 ) parser.add_argument("""--seed""" ,type=lowercase ,default=0 ) parser.add_argument("""--lr_scheduler_type""" ,type=lowercase ,default="""cosine""" ) parser.add_argument("""--num_warmup_steps""" ,type=lowercase ,default=10 ) parser.add_argument("""--weight_decay""" ,type=lowercase ,default=0.01 ) parser.add_argument("""--output_dir""" ,type=lowercase ,default="""./results""" ) return parser.parse_args() UpperCAmelCase__ = load("""accuracy""") def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = eval_pred _UpperCAmelCase = np.argmax(lowercase ,axis=1 ) return metric.compute(predictions=lowercase ,references=lowercase ) class a ( lowerCAmelCase_ ): def __init__( self : List[str] , __lowerCAmelCase : Tuple ): super().__init__() _UpperCAmelCase = trainer def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , **__lowerCAmelCase : Dict ): if control.should_evaluate: _UpperCAmelCase = deepcopy(__lowerCAmelCase ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" ) return control_copy def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = get_args() set_seed(args.seed ) _UpperCAmelCase = load_dataset("""codeparrot/codecomplex""" ,split="""train""" ) _UpperCAmelCase = dataset.train_test_split(test_size=0.2 ) _UpperCAmelCase = train_test["""test"""].train_test_split(test_size=0.5 ) _UpperCAmelCase = DatasetDict( { """train""": train_test["""train"""], """test""": test_validation["""train"""], """valid""": test_validation["""test"""], } ) print("""Loading tokenizer and model""" ) _UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt ) _UpperCAmelCase = tokenizer.eos_token _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt ,num_labels=7 ) _UpperCAmelCase = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): _UpperCAmelCase = False _UpperCAmelCase = ClassLabel(num_classes=7 ,names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) ) def tokenize(lowercase ): _UpperCAmelCase = tokenizer(example["""src"""] ,truncation=lowercase ,max_length=10_24 ) _UpperCAmelCase = labels.straint(example["""complexity"""] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } _UpperCAmelCase = train_test_validation.map( lowercase ,batched=lowercase ,remove_columns=train_test_validation["""train"""].column_names ,) _UpperCAmelCase = DataCollatorWithPadding(tokenizer=lowercase ) _UpperCAmelCase = TrainingArguments( output_dir=args.output_dir ,learning_rate=args.learning_rate ,lr_scheduler_type=args.lr_scheduler_type ,evaluation_strategy="""epoch""" ,save_strategy="""epoch""" ,logging_strategy="""epoch""" ,per_device_train_batch_size=args.batch_size ,per_device_eval_batch_size=args.batch_size ,num_train_epochs=args.num_epochs ,gradient_accumulation_steps=args.gradient_accumulation_steps ,weight_decay=0.01 ,metric_for_best_model="""accuracy""" ,run_name="""complexity-java""" ,report_to="""wandb""" ,) _UpperCAmelCase = Trainer( model=lowercase ,args=lowercase ,train_dataset=tokenized_datasets["""train"""] ,eval_dataset=tokenized_datasets["""valid"""] ,tokenizer=lowercase ,data_collator=lowercase ,compute_metrics=lowercase ,) print("""Training...""" ) trainer.add_callback(CustomCallback(lowercase ) ) trainer.train() if __name__ == "__main__": main()
289
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a ( unittest.TestCase ): def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Optional[Any]=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : List[str]=400 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]=True , ): _UpperCAmelCase = size if size is not None else {"""shortest_edge""": 20} _UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = do_flip_channel_order def lowerCAmelCase_ ( self : List[str] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = MobileViTImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = MobileViTImageProcessingTester(self ) @property def lowerCAmelCase_ ( self : Tuple ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_flip_channel_order""" ) ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def lowerCAmelCase_ ( self : List[str] ): pass def lowerCAmelCase_ ( self : Dict ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : str ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : Optional[int] ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
289
1
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : str = IFInpaintingPipeline _snake_case : Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'} _snake_case : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS _snake_case : List[Any] = PipelineTesterMixin.required_optional_params - {'latents'} def lowerCAmelCase_ ( self : Union[str, Any] ): return self._get_dummy_components() def lowerCAmelCase_ ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : List[Any]=0 ): if str(__lowerCAmelCase ).startswith("""mps""" ): _UpperCAmelCase = torch.manual_seed(__lowerCAmelCase ) else: _UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) _UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) _UpperCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowerCAmelCase_ ( self : List[Any] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def lowerCAmelCase_ ( self : int ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def lowerCAmelCase_ ( self : int ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def lowerCAmelCase_ ( self : Union[str, Any] ): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def lowerCAmelCase_ ( self : Tuple ): self._test_save_load_local() def lowerCAmelCase_ ( self : Dict ): self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
289
"""simple docstring""" from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Any = 'efficientnet' def __init__( self : Any , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 600 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2560 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.001 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : List[Any] , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = width_coefficient _UpperCAmelCase = depth_coefficient _UpperCAmelCase = depth_divisor _UpperCAmelCase = kernel_sizes _UpperCAmelCase = in_channels _UpperCAmelCase = out_channels _UpperCAmelCase = depthwise_padding _UpperCAmelCase = strides _UpperCAmelCase = num_block_repeats _UpperCAmelCase = expand_ratios _UpperCAmelCase = squeeze_expansion_ratio _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dim _UpperCAmelCase = pooling_type _UpperCAmelCase = initializer_range _UpperCAmelCase = batch_norm_eps _UpperCAmelCase = batch_norm_momentum _UpperCAmelCase = dropout_rate _UpperCAmelCase = drop_connect_rate _UpperCAmelCase = sum(__lowerCAmelCase ) * 4 class a ( lowerCAmelCase_ ): _snake_case : Dict = version.parse('1.11' ) @property def lowerCAmelCase_ ( self : Any ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase_ ( self : int ): return 1e-5
289
1
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.g4dn.xlarge', 'results': {'train_runtime': 6_50, 'eval_accuracy': 0.6, 'eval_loss': 0.9}, }, { 'framework': 'tensorflow', 'script': 'run_tf.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.g4dn.xlarge', 'results': {'train_runtime': 6_00, 'eval_accuracy': 0.3, 'eval_loss': 0.9}, }, ] ) class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : Union[str, Any] ): if self.framework == "pytorch": subprocess.run( f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__lowerCAmelCase , ) assert hasattr(self , """env""" ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Optional[Any]=1 ): # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-single''' , instance_count=__lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCAmelCase , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , ) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] ): TrainingJobAnalytics(__lowerCAmelCase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' ) def lowerCAmelCase_ ( self : str ): # create estimator _UpperCAmelCase = self.create_estimator() # run training estimator.fit() # result dataframe _UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis _UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) _UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping _UpperCAmelCase = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __lowerCAmelCase )
289
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class a : def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : str=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Optional[int]=16 , __lowerCAmelCase : Dict=36 , __lowerCAmelCase : Optional[Any]=6 , __lowerCAmelCase : List[str]=6 , __lowerCAmelCase : Union[str, Any]=6 , __lowerCAmelCase : str=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Any=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = embedding_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_hidden_groups _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Union[str, Any] ): return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Any ): _UpperCAmelCase = AlbertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ): _UpperCAmelCase = AlbertForPreTraining(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , sentence_order_label=__lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ): _UpperCAmelCase = AlbertForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ): _UpperCAmelCase = AlbertForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = AlbertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = AlbertForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = AlbertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : str = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) _snake_case : Tuple = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) _snake_case : Dict = True def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any]=False ): _UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): _UpperCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase ) _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = AlbertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Optional[int] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase = type self.model_tester.create_and_check_model(*__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = AlbertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = AlbertModel.from_pretrained("""albert-base-v2""" ) _UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] _UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
289
1
"""simple docstring""" import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def __UpperCAmelCase ( lowercase=None ,lowercase=None ): """simple docstring""" return field(default_factory=lambda: default ,metadata=lowercase ) @dataclass class a : _snake_case : str = field( metadata={'help': 'The csv file to plot.'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Disable logarithmic scale when plotting'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={ 'help': 'Whether the csv file has training results or inference results. Defaults to inference results.' } , ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , ) _snake_case : Optional[List[str]] = list_field( default=lowerCAmelCase_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} ) def __UpperCAmelCase ( lowercase ): """simple docstring""" try: int(lowercase ) return True except ValueError: return False def __UpperCAmelCase ( lowercase ): """simple docstring""" try: float(lowercase ) return True except ValueError: return False class a : def __init__( self : Dict , __lowerCAmelCase : Tuple ): _UpperCAmelCase = args _UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline="""""" ) as csv_file: _UpperCAmelCase = csv.DictReader(__lowerCAmelCase ) for row in reader: _UpperCAmelCase = row["""model"""] self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) ) self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) ) if can_convert_to_int(row["""result"""] ): # value is not None _UpperCAmelCase = int(row["""result"""] ) elif can_convert_to_float(row["""result"""] ): # value is not None _UpperCAmelCase = float(row["""result"""] ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = plt.subplots() _UpperCAmelCase = """Time usage""" if self.args.is_time else """Memory usage""" _UpperCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference""" if not self.args.no_log_scale: # set logarithm scales ax.set_xscale("""log""" ) ax.set_yscale("""log""" ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): _UpperCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) ) _UpperCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) ) _UpperCAmelCase = self.result_dict[model_name]["""result"""] ((_UpperCAmelCase) , (_UpperCAmelCase)) = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) _UpperCAmelCase = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: _UpperCAmelCase = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , ) else: _UpperCAmelCase = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((_UpperCAmelCase) , (_UpperCAmelCase)) = ( ("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""") ) _UpperCAmelCase = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )] plt.scatter( __lowerCAmelCase , __lowerCAmelCase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' ) plt.plot(__lowerCAmelCase , __lowerCAmelCase , """--""" ) title_str += f''' {label_model_name} vs.''' _UpperCAmelCase = title_str[:-4] _UpperCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB""" # plot plt.title(__lowerCAmelCase ) plt.xlabel(__lowerCAmelCase ) plt.ylabel(__lowerCAmelCase ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = HfArgumentParser(lowercase ) _UpperCAmelCase = parser.parse_args_into_dataclasses()[0] _UpperCAmelCase = Plot(args=lowercase ) plot.plot() if __name__ == "__main__": main()
289
"""simple docstring""" UpperCAmelCase__ = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" # Return True if there is node that has not iterated. _UpperCAmelCase = [False] * len(lowercase ) _UpperCAmelCase = [s] _UpperCAmelCase = True while queue: _UpperCAmelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase ) _UpperCAmelCase = True _UpperCAmelCase = u return visited[t] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = [-1] * (len(lowercase )) _UpperCAmelCase = 0 _UpperCAmelCase = [] _UpperCAmelCase = [i[:] for i in graph] # Record original cut, copy. while bfs(lowercase ,lowercase ,lowercase ,lowercase ): _UpperCAmelCase = float("""Inf""" ) _UpperCAmelCase = sink while s != source: # Find the minimum value in select path _UpperCAmelCase = min(lowercase ,graph[parent[s]][s] ) _UpperCAmelCase = parent[s] max_flow += path_flow _UpperCAmelCase = sink while v != source: _UpperCAmelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCAmelCase = parent[v] for i in range(len(lowercase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
289
1
"""simple docstring""" from string import ascii_uppercase UpperCAmelCase__ = {char: i for i, char in enumerate(ascii_uppercase)} UpperCAmelCase__ = dict(enumerate(ascii_uppercase)) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = len(lowercase ) _UpperCAmelCase = 0 while True: if x == i: _UpperCAmelCase = 0 if len(lowercase ) == len(lowercase ): break key += key[i] i += 1 return key def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = """""" _UpperCAmelCase = 0 for letter in message: if letter == " ": cipher_text += " " else: _UpperCAmelCase = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = """""" _UpperCAmelCase = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: _UpperCAmelCase = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = """THE GERMAN ATTACK""" _UpperCAmelCase = """SECRET""" _UpperCAmelCase = generate_key(lowercase ,lowercase ) _UpperCAmelCase = cipher_text(lowercase ,lowercase ) print(f'''Encrypted Text = {s}''' ) print(f'''Original Text = {original_text(lowercase ,lowercase )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
289
"""simple docstring""" import math class a : def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ): _UpperCAmelCase = 0.0 _UpperCAmelCase = 0.0 for i in range(len(__lowerCAmelCase ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : list[list[int | float]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : float ): for i in range(len(__lowerCAmelCase ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def __UpperCAmelCase ( ): """simple docstring""" # Training Examples ( m, n ) _UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _UpperCAmelCase = SelfOrganizingMap() _UpperCAmelCase = 3 _UpperCAmelCase = 0.5 for _ in range(lowercase ): for j in range(len(lowercase ) ): # training sample _UpperCAmelCase = training_samples[j] # Compute the winning vector _UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase ) # Update the winning vector _UpperCAmelCase = self_organizing_map.update(lowercase ,lowercase ,lowercase ,lowercase ) # classify test sample _UpperCAmelCase = [0, 0, 0, 1] _UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase ) # results print(f'''Clusters that the test sample belongs to : {winner}''' ) print(f'''Weights that have been trained : {weights}''' ) # running the main() function if __name__ == "__main__": main()
289
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCAmelCase__ = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
289
"""simple docstring""" import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class a : def __init__( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=99 , __lowerCAmelCase : int=64 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Union[str, Any]=64 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : str=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def lowerCAmelCase_ ( self : Union[str, Any] ): return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Optional[int] ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ): _UpperCAmelCase = MPNetModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ): _UpperCAmelCase = MPNetForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = MPNetForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = MPNetForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = MPNetForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.prepare_config_and_inputs() ((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : List[Any] = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) _snake_case : Union[str, Any] = ( { 'feature-extraction': MPNetModel, 'fill-mask': MPNetForMaskedLM, 'question-answering': MPNetForQuestionAnswering, 'text-classification': MPNetForSequenceClassification, 'token-classification': MPNetForTokenClassification, 'zero-shot': MPNetForSequenceClassification, } if is_torch_available() else {} ) _snake_case : int = False _snake_case : List[Any] = True def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = MPNetModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Dict ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*__lowerCAmelCase ) @require_torch class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = MPNetModel.from_pretrained("""microsoft/mpnet-base""" ) _UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCAmelCase = model(__lowerCAmelCase )[0] _UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor( [[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
289
1
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Tuple = DiTPipeline _snake_case : int = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS _snake_case : List[str] = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } _snake_case : Tuple = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS _snake_case : int = False def lowerCAmelCase_ ( self : str ): torch.manual_seed(0 ) _UpperCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__lowerCAmelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=__lowerCAmelCase , ) _UpperCAmelCase = AutoencoderKL() _UpperCAmelCase = DDIMScheduler() _UpperCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler} return components def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any]=0 ): if str(__lowerCAmelCase ).startswith("""mps""" ): _UpperCAmelCase = torch.manual_seed(__lowerCAmelCase ) else: _UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _UpperCAmelCase = { """class_labels""": [1], """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = """cpu""" _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase ) _UpperCAmelCase = pipe(**__lowerCAmelCase ).images _UpperCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _UpperCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] ) _UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__lowerCAmelCase , 1e-3 ) def lowerCAmelCase_ ( self : Optional[int] ): self._test_inference_batch_single_identical(relax_max_difference=__lowerCAmelCase , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowerCAmelCase_ ( self : Optional[int] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : int ): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) _UpperCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""] _UpperCAmelCase = pipe.get_label_ids(__lowerCAmelCase ) _UpperCAmelCase = pipe(__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=40 , output_type="""np""" ).images for word, image in zip(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase = load_numpy( f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' ) assert np.abs((expected_image - image).max() ) < 1e-2 def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) _UpperCAmelCase = ["""vase""", """umbrella"""] _UpperCAmelCase = pipe.get_label_ids(__lowerCAmelCase ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=25 , output_type="""np""" ).images for word, image in zip(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" f'''/dit/{word}_512.npy''' ) assert np.abs((expected_image - image).max() ) < 1e-1
289
"""simple docstring""" UpperCAmelCase__ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) UpperCAmelCase__ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 1_2, """Pm""": 1_5, """Em""": 1_8, """Zm""": 2_1, """Ym""": 2_4, } def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = from_type.lower().strip("""s""" ) _UpperCAmelCase = to_type.lower().strip("""s""" ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) _UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase ) if from_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'from_type\' value: {from_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) if to_sanitized not in METRIC_CONVERSION: _UpperCAmelCase = ( f'''Invalid \'to_type\' value: {to_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) _UpperCAmelCase = METRIC_CONVERSION[from_sanitized] _UpperCAmelCase = METRIC_CONVERSION[to_sanitized] _UpperCAmelCase = 1 if from_exponent > to_exponent: _UpperCAmelCase = from_exponent - to_exponent else: _UpperCAmelCase = -(to_exponent - from_exponent) return value * pow(10 ,lowercase ) if __name__ == "__main__": from doctest import testmod testmod()
289
1
"""simple docstring""" import math def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = input("""Enter message: """ ) _UpperCAmelCase = int(input(f'''Enter key [2-{len(lowercase ) - 1}]: ''' ) ) _UpperCAmelCase = input("""Encryption/Decryption [e/d]: """ ) if mode.lower().startswith("""e""" ): _UpperCAmelCase = encrypt_message(lowercase ,lowercase ) elif mode.lower().startswith("""d""" ): _UpperCAmelCase = decrypt_message(lowercase ,lowercase ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(f'''Output:\n{text + "|"}''' ) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = [""""""] * key for col in range(lowercase ): _UpperCAmelCase = col while pointer < len(lowercase ): cipher_text[col] += message[pointer] pointer += key return "".join(lowercase ) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = math.ceil(len(lowercase ) / key ) _UpperCAmelCase = key _UpperCAmelCase = (num_cols * num_rows) - len(lowercase ) _UpperCAmelCase = [""""""] * num_cols _UpperCAmelCase = 0 _UpperCAmelCase = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): _UpperCAmelCase = 0 row += 1 return "".join(lowercase ) if __name__ == "__main__": import doctest doctest.testmod() main()
289
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase__ = 1_6 UpperCAmelCase__ = 3_2 def __UpperCAmelCase ( lowercase ,lowercase = 16 ): """simple docstring""" _UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) _UpperCAmelCase = DataLoader( tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase__ = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1": _UpperCAmelCase = 2 # Initialize accelerator _UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config["""lr"""] _UpperCAmelCase = int(config["""num_epochs"""] ) _UpperCAmelCase = int(config["""seed"""] ) _UpperCAmelCase = int(config["""batch_size"""] ) _UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowercase ) def inner_training_loop(lowercase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(lowercase ,lowercase ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowercase ,references=lowercase ,) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' ,lowercase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" ,) parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowercase ,lowercase ) if __name__ == "__main__": main()
289
1
"""simple docstring""" import math def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if initial_intensity < 0: raise ValueError("""The value of intensity cannot be negative""" ) # handling of negative values of initial intensity if angle < 0 or angle > 3_60: raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(lowercase ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name="""malus_law""")
289
"""simple docstring""" import warnings warnings.warn( """memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """ """`from accelerate import find_executable_batch_size` to avoid this warning.""", FutureWarning, )
289
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class a ( metaclass=lowerCAmelCase_ ): _snake_case : Dict = ['transformers', 'torch', 'note_seq'] def __init__( self : List[str] , *__lowerCAmelCase : str , **__lowerCAmelCase : Union[str, Any] ): requires_backends(self , ["""transformers""", """torch""", """note_seq"""] ) @classmethod def lowerCAmelCase_ ( cls : str , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Dict ): requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] ) @classmethod def lowerCAmelCase_ ( cls : Tuple , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ): requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
289
"""simple docstring""" import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin UpperCAmelCase__ = logging.get_logger(__name__) enable_full_determinism() class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = UNetaDModel _snake_case : List[str] = 'sample' @property def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (32, 32) _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : List[Any] ): return (3, 32, 32) @property def lowerCAmelCase_ ( self : Optional[Any] ): return (3, 32, 32) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = { """block_out_channels""": (32, 64), """down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""), """up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""), """attention_head_dim""": 3, """out_channels""": 3, """in_channels""": 3, """layers_per_block""": 2, """sample_size""": 32, } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : int = UNetaDModel _snake_case : Optional[Any] = 'sample' @property def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = 4 _UpperCAmelCase = 4 _UpperCAmelCase = (32, 32) _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : Optional[Any] ): return (4, 32, 32) @property def lowerCAmelCase_ ( self : Dict ): return (4, 32, 32) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = { """sample_size""": 32, """in_channels""": 4, """out_channels""": 4, """layers_per_block""": 2, """block_out_channels""": (32, 64), """attention_head_dim""": 32, """down_block_types""": ("""DownBlock2D""", """DownBlock2D"""), """up_block_types""": ("""UpBlock2D""", """UpBlock2D"""), } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) _UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model.to(__lowerCAmelCase ) _UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self : str ): # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model_accelerate.to(__lowerCAmelCase ) model_accelerate.eval() _UpperCAmelCase = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) _UpperCAmelCase = noise.to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) _UpperCAmelCase = model_accelerate(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained( """fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase , low_cpu_mem_usage=__lowerCAmelCase ) model_normal_load.to(__lowerCAmelCase ) model_normal_load.eval() _UpperCAmelCase = model_normal_load(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ) model.eval() model.to(__lowerCAmelCase ) _UpperCAmelCase = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) _UpperCAmelCase = noise.to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) ) class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[Any] = UNetaDModel _snake_case : str = 'sample' @property def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str=(32, 32) ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : Any ): return (3, 32, 32) @property def lowerCAmelCase_ ( self : Union[str, Any] ): return (3, 32, 32) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = { """block_out_channels""": [32, 64, 64, 64], """in_channels""": 3, """layers_per_block""": 1, """out_channels""": 3, """time_embedding_type""": """fourier""", """norm_eps""": 1e-6, """mid_block_scale_factor""": math.sqrt(2.0 ), """norm_num_groups""": None, """down_block_types""": [ """SkipDownBlock2D""", """AttnSkipDownBlock2D""", """SkipDownBlock2D""", """SkipDownBlock2D""", ], """up_block_types""": [ """SkipUpBlock2D""", """SkipUpBlock2D""", """AttnSkipUpBlock2D""", """SkipUpBlock2D""", ], } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict @slow def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) _UpperCAmelCase = self.dummy_input _UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(__lowerCAmelCase ) _UpperCAmelCase = noise _UpperCAmelCase = model(**__lowerCAmelCase ) assert image is not None, "Make sure output is not None" @slow def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ) model.to(__lowerCAmelCase ) _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (256, 256) _UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" ) model.to(__lowerCAmelCase ) _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (32, 32) _UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def lowerCAmelCase_ ( self : List[str] ): # not required for this model pass
289
1
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process UpperCAmelCase__ = logging.getLogger(__name__) @dataclass class a : _snake_case : str = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _snake_case : Optional[str] = field( default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _snake_case : bool = field(default=lowerCAmelCase_ , metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class a : _snake_case : str = field( metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , ) _snake_case : int = field( default=1_28 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def __UpperCAmelCase ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""" ) _UpperCAmelCase = import_module("""tasks""" ) try: _UpperCAmelCase = getattr(lowercase ,model_args.task_type ) _UpperCAmelCase = token_classification_task_clazz() except AttributeError: raise ValueError( f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" ,lowercase ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task _UpperCAmelCase = token_classification_task.get_labels(data_args.labels ) _UpperCAmelCase = dict(enumerate(lowercase ) ) _UpperCAmelCase = len(lowercase ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=lowercase ,idalabel=lowercase ,labelaid={label: i for i, label in enumerate(lowercase )} ,cache_dir=model_args.cache_dir ,) _UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast ,) _UpperCAmelCase = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=lowercase ,cache_dir=model_args.cache_dir ,) # Get datasets _UpperCAmelCase = ( TokenClassificationDataset( token_classification_task=lowercase ,data_dir=data_args.data_dir ,tokenizer=lowercase ,labels=lowercase ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,) if training_args.do_train else None ) _UpperCAmelCase = ( TokenClassificationDataset( token_classification_task=lowercase ,data_dir=data_args.data_dir ,tokenizer=lowercase ,labels=lowercase ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,) if training_args.do_eval else None ) def align_predictions(lowercase ,lowercase ) -> Tuple[List[int], List[int]]: _UpperCAmelCase = np.argmax(lowercase ,axis=2 ) _UpperCAmelCase , _UpperCAmelCase = preds.shape _UpperCAmelCase = [[] for _ in range(lowercase )] _UpperCAmelCase = [[] for _ in range(lowercase )] for i in range(lowercase ): for j in range(lowercase ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(lowercase ) -> Dict: _UpperCAmelCase , _UpperCAmelCase = align_predictions(p.predictions ,p.label_ids ) return { "accuracy_score": accuracy_score(lowercase ,lowercase ), "precision": precision_score(lowercase ,lowercase ), "recall": recall_score(lowercase ,lowercase ), "f1": fa_score(lowercase ,lowercase ), } # Data collator _UpperCAmelCase = DataCollatorWithPadding(lowercase ,pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer _UpperCAmelCase = Trainer( model=lowercase ,args=lowercase ,train_dataset=lowercase ,eval_dataset=lowercase ,compute_metrics=lowercase ,data_collator=lowercase ,) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _UpperCAmelCase = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) _UpperCAmelCase = trainer.evaluate() _UpperCAmelCase = os.path.join(training_args.output_dir ,"""eval_results.txt""" ) if trainer.is_world_process_zero(): with open(lowercase ,"""w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(""" %s = %s""" ,lowercase ,lowercase ) writer.write("""%s = %s\n""" % (key, value) ) results.update(lowercase ) # Predict if training_args.do_predict: _UpperCAmelCase = TokenClassificationDataset( token_classification_task=lowercase ,data_dir=data_args.data_dir ,tokenizer=lowercase ,labels=lowercase ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.test ,) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = trainer.predict(lowercase ) _UpperCAmelCase , _UpperCAmelCase = align_predictions(lowercase ,lowercase ) _UpperCAmelCase = os.path.join(training_args.output_dir ,"""test_results.txt""" ) if trainer.is_world_process_zero(): with open(lowercase ,"""w""" ) as writer: for key, value in metrics.items(): logger.info(""" %s = %s""" ,lowercase ,lowercase ) writer.write("""%s = %s\n""" % (key, value) ) # Save predictions _UpperCAmelCase = os.path.join(training_args.output_dir ,"""test_predictions.txt""" ) if trainer.is_world_process_zero(): with open(lowercase ,"""w""" ) as writer: with open(os.path.join(data_args.data_dir ,"""test.txt""" ) ,"""r""" ) as f: token_classification_task.write_predictions_to_file(lowercase ,lowercase ,lowercase ) return results def __UpperCAmelCase ( lowercase ): """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
289
"""simple docstring""" import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : int = StableUnCLIPPipeline _snake_case : str = TEXT_TO_IMAGE_PARAMS _snake_case : Any = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS _snake_case : str = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _snake_case : str = False def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = 32 _UpperCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) _UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=__lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) _UpperCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__lowerCAmelCase , num_layers=1 , ) torch.manual_seed(0 ) _UpperCAmelCase = DDPMScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , ) # regular denoising components torch.manual_seed(0 ) _UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=__lowerCAmelCase ) _UpperCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) _UpperCAmelCase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCAmelCase , layers_per_block=1 , upcast_attention=__lowerCAmelCase , use_linear_projection=__lowerCAmelCase , ) torch.manual_seed(0 ) _UpperCAmelCase = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , ) torch.manual_seed(0 ) _UpperCAmelCase = AutoencoderKL() _UpperCAmelCase = { # prior components """prior_tokenizer""": prior_tokenizer, """prior_text_encoder""": prior_text_encoder, """prior""": prior, """prior_scheduler""": prior_scheduler, # image noising components """image_normalizer""": image_normalizer, """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder, """unet""": unet, """scheduler""": scheduler, """vae""": vae, } return components def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=0 ): if str(__lowerCAmelCase ).startswith("""mps""" ): _UpperCAmelCase = torch.manual_seed(__lowerCAmelCase ) else: _UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _UpperCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """prior_num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = torch_device == """cpu""" self._test_attention_slicing_forward_pass(test_max_difference=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=__lowerCAmelCase ) @slow @require_torch_gpu class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" ) _UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _UpperCAmelCase = pipe("""anime turle""" , generator=__lowerCAmelCase , output_type="""np""" ) _UpperCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) _UpperCAmelCase = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _UpperCAmelCase = pipe( """anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , ) _UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
289
1
"""simple docstring""" import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets UpperCAmelCase__ = """\ @inproceedings{lin-2004-rouge, title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\", author = \"Lin, Chin-Yew\", booktitle = \"Text Summarization Branches Out\", month = jul, year = \"2004\", address = \"Barcelona, Spain\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W04-1013\", pages = \"74--81\", } """ UpperCAmelCase__ = """\ ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for evaluating automatic summarization and machine translation software in natural language processing. The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation. Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters. This metrics is a wrapper around Google Research reimplementation of ROUGE: https://github.com/google-research/google-research/tree/master/rouge """ UpperCAmelCase__ = """ Calculates average rouge scores for a list of hypotheses and references Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. rouge_types: A list of rouge types to calculate. Valid names: `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring, `\"rougeL\"`: Longest common subsequence based scoring. `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`. See details in https://github.com/huggingface/datasets/issues/617 use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes. use_aggregator: Return aggregates if this is set to True Returns: rouge1: rouge_1 (precision, recall, f1), rouge2: rouge_2 (precision, recall, f1), rougeL: rouge_l (precision, recall, f1), rougeLsum: rouge_lsum (precision, recall, f1) Examples: >>> rouge = datasets.load_metric('rouge') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> results = rouge.compute(predictions=predictions, references=references) >>> print(list(results.keys())) ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] >>> print(results[\"rouge1\"]) AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0)) >>> print(results[\"rouge1\"].mid.fmeasure) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): def lowerCAmelCase_ ( self : str ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[ """https://en.wikipedia.org/wiki/ROUGE_(metric)""", """https://github.com/google-research/google-research/tree/master/rouge""", ] , ) def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[Any]=False ): if rouge_types is None: _UpperCAmelCase = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""] _UpperCAmelCase = rouge_scorer.RougeScorer(rouge_types=__lowerCAmelCase , use_stemmer=__lowerCAmelCase ) if use_aggregator: _UpperCAmelCase = scoring.BootstrapAggregator() else: _UpperCAmelCase = [] for ref, pred in zip(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase = scorer.score(__lowerCAmelCase , __lowerCAmelCase ) if use_aggregator: aggregator.add_scores(__lowerCAmelCase ) else: scores.append(__lowerCAmelCase ) if use_aggregator: _UpperCAmelCase = aggregator.aggregate() else: _UpperCAmelCase = {} for key in scores[0]: _UpperCAmelCase = [score[key] for score in scores] return result
289
"""simple docstring""" from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
289
1
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def __UpperCAmelCase ( lowercase ,lowercase=False ): """simple docstring""" _UpperCAmelCase = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''module.blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''module.blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''module.blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''module.blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''module.blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''module.blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("""module.cls_token""", """vit.embeddings.cls_token"""), ("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""module.pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""module.norm.weight""", """layernorm.weight"""), ("""module.norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: _UpperCAmelCase = """""" else: _UpperCAmelCase = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _UpperCAmelCase = state_dict.pop(f'''module.blocks.{i}.attn.qkv.weight''' ) _UpperCAmelCase = state_dict.pop(f'''module.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _UpperCAmelCase = in_proj_weight[ : config.hidden_size, : ] _UpperCAmelCase = in_proj_bias[: config.hidden_size] _UpperCAmelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _UpperCAmelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _UpperCAmelCase = in_proj_weight[ -config.hidden_size :, : ] _UpperCAmelCase = in_proj_bias[-config.hidden_size :] def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(lowercase ,lowercase ) def __UpperCAmelCase ( lowercase ): """simple docstring""" # projection head is used in the self-supervised pre-training in MSN, # for downstream task it's not needed. _UpperCAmelCase = [ """module.fc.fc1.weight""", """module.fc.fc1.bias""", """module.fc.bn1.weight""", """module.fc.bn1.bias""", """module.fc.bn1.running_mean""", """module.fc.bn1.running_var""", """module.fc.bn1.num_batches_tracked""", """module.fc.fc2.weight""", """module.fc.fc2.bias""", """module.fc.bn2.weight""", """module.fc.bn2.bias""", """module.fc.bn2.running_mean""", """module.fc.bn2.running_var""", """module.fc.bn2.num_batches_tracked""", """module.fc.fc3.weight""", """module.fc.fc3.bias""", ] for k in ignore_keys: state_dict.pop(lowercase ,lowercase ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = dct.pop(lowercase ) _UpperCAmelCase = val def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = ViTMSNConfig() _UpperCAmelCase = 10_00 _UpperCAmelCase = """datasets/huggingface/label-files""" _UpperCAmelCase = """imagenet-1k-id2label.json""" _UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ) ,"""r""" ) ) _UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: _UpperCAmelCase = 3_84 _UpperCAmelCase = 15_36 _UpperCAmelCase = 6 elif "l16" in checkpoint_url: _UpperCAmelCase = 10_24 _UpperCAmelCase = 40_96 _UpperCAmelCase = 24 _UpperCAmelCase = 16 _UpperCAmelCase = 0.1 elif "b4" in checkpoint_url: _UpperCAmelCase = 4 elif "l7" in checkpoint_url: _UpperCAmelCase = 7 _UpperCAmelCase = 10_24 _UpperCAmelCase = 40_96 _UpperCAmelCase = 24 _UpperCAmelCase = 16 _UpperCAmelCase = 0.1 _UpperCAmelCase = ViTMSNModel(lowercase ) _UpperCAmelCase = torch.hub.load_state_dict_from_url(lowercase ,map_location="""cpu""" )["""target_encoder"""] _UpperCAmelCase = ViTImageProcessor(size=config.image_size ) remove_projection_head(lowercase ) _UpperCAmelCase = create_rename_keys(lowercase ,base_model=lowercase ) for src, dest in rename_keys: rename_key(lowercase ,lowercase ,lowercase ) read_in_q_k_v(lowercase ,lowercase ,base_model=lowercase ) model.load_state_dict(lowercase ) model.eval() _UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw ) _UpperCAmelCase = ViTImageProcessor( size=config.image_size ,image_mean=lowercase ,image_std=lowercase ) _UpperCAmelCase = image_processor(images=lowercase ,return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: _UpperCAmelCase = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] ) elif "b16" in checkpoint_url: _UpperCAmelCase = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] ) elif "l16" in checkpoint_url: _UpperCAmelCase = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] ) elif "b4" in checkpoint_url: _UpperCAmelCase = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] ) else: _UpperCAmelCase = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] ,lowercase ,atol=1E-4 ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowercase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) UpperCAmelCase__ = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
289
"""simple docstring""" import requests UpperCAmelCase__ = """""" # <-- Put your OpenWeatherMap appid here! UpperCAmelCase__ = """https://api.openweathermap.org/data/2.5/""" def __UpperCAmelCase ( lowercase = "Chicago" ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """weather""" ,params=locals() ).json() def __UpperCAmelCase ( lowercase = "Kolkata, India" ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """forecast""" ,params=locals() ).json() def __UpperCAmelCase ( lowercase = 55.68 ,lowercase = 12.57 ,lowercase = APPID ): """simple docstring""" return requests.get(URL_BASE + """onecall""" ,params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: UpperCAmelCase__ = input("""Enter a location:""").strip() if location: pprint(current_weather(location)) else: break
289
1
"""simple docstring""" from abc import ABC, abstractmethod from typing import List, Optional class a ( lowerCAmelCase_ ): def __init__( self : List[str] ): # test for the above condition self.test() def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = 0 _UpperCAmelCase = False while not completed: if counter == 1: self.reset() _UpperCAmelCase = self.advance() if not self.does_advance(__lowerCAmelCase ): raise Exception( """Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.update(__lowerCAmelCase ) counter += 1 if counter > 1_0000: raise Exception("""update() does not fulfill the constraint.""" ) if self.remaining() != 0: raise Exception("""Custom Constraint is not defined correctly.""" ) @abstractmethod def lowerCAmelCase_ ( self : List[str] ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def lowerCAmelCase_ ( self : int , __lowerCAmelCase : int ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : int ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def lowerCAmelCase_ ( self : List[str] ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def lowerCAmelCase_ ( self : Any ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Optional[Any]=False ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class a ( lowerCAmelCase_ ): def __init__( self : Any , __lowerCAmelCase : List[int] ): super(__lowerCAmelCase , self ).__init__() if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or len(__lowerCAmelCase ) == 0: raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) _UpperCAmelCase = token_ids _UpperCAmelCase = len(self.token_ids ) _UpperCAmelCase = -1 # the index of the currently fulfilled step _UpperCAmelCase = False def lowerCAmelCase_ ( self : List[Any] ): if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(__lowerCAmelCase )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(__lowerCAmelCase )}''' ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False if self.does_advance(__lowerCAmelCase ): self.fulfilled_idx += 1 _UpperCAmelCase = True if self.fulfilled_idx == (self.seqlen - 1): _UpperCAmelCase = True _UpperCAmelCase = completed else: # failed to make progress. _UpperCAmelCase = True self.reset() return stepped, completed, reset def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = False _UpperCAmelCase = 0 def lowerCAmelCase_ ( self : Any ): return self.seqlen - (self.fulfilled_idx + 1) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Tuple=False ): _UpperCAmelCase = PhrasalConstraint(self.token_ids ) if stateful: _UpperCAmelCase = self.seqlen _UpperCAmelCase = self.fulfilled_idx _UpperCAmelCase = self.completed return new_constraint class a : def __init__( self : List[str] , __lowerCAmelCase : List[List[int]] , __lowerCAmelCase : int=True ): _UpperCAmelCase = max([len(__lowerCAmelCase ) for one in nested_token_ids] ) _UpperCAmelCase = {} for token_ids in nested_token_ids: _UpperCAmelCase = root for tidx, token_id in enumerate(__lowerCAmelCase ): if token_id not in level: _UpperCAmelCase = {} _UpperCAmelCase = level[token_id] if no_subsets and self.has_subsets(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError( """Each list in `nested_token_ids` can't be a complete subset of another list, but is""" f''' {nested_token_ids}.''' ) _UpperCAmelCase = root def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict ): _UpperCAmelCase = self.trie for current_token in current_seq: _UpperCAmelCase = start[current_token] _UpperCAmelCase = list(start.keys() ) return next_tokens def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple ): _UpperCAmelCase = self.next_tokens(__lowerCAmelCase ) return len(__lowerCAmelCase ) == 0 def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict ): _UpperCAmelCase = list(root.values() ) if len(__lowerCAmelCase ) == 0: return 1 else: return sum([self.count_leaves(__lowerCAmelCase ) for nn in next_nodes] ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ): _UpperCAmelCase = self.count_leaves(__lowerCAmelCase ) return len(__lowerCAmelCase ) != leaf_count class a ( lowerCAmelCase_ ): def __init__( self : str , __lowerCAmelCase : List[List[int]] ): super(__lowerCAmelCase , self ).__init__() if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or len(__lowerCAmelCase ) == 0: raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) for token_ids in nested_token_ids ): raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) _UpperCAmelCase = DisjunctiveTrie(__lowerCAmelCase ) _UpperCAmelCase = nested_token_ids _UpperCAmelCase = self.trie.max_height _UpperCAmelCase = [] _UpperCAmelCase = False def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.trie.next_tokens(self.current_seq ) if len(__lowerCAmelCase ) == 0: return None else: return token_list def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowerCAmelCase )}''' ) _UpperCAmelCase = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def lowerCAmelCase_ ( self : int , __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowerCAmelCase )}''' ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False if self.does_advance(__lowerCAmelCase ): self.current_seq.append(__lowerCAmelCase ) _UpperCAmelCase = True else: _UpperCAmelCase = True self.reset() _UpperCAmelCase = self.trie.reached_leaf(self.current_seq ) _UpperCAmelCase = completed return stepped, completed, reset def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = False _UpperCAmelCase = [] def lowerCAmelCase_ ( self : int ): if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Any=False ): _UpperCAmelCase = DisjunctiveConstraint(self.token_ids ) if stateful: _UpperCAmelCase = self.seqlen _UpperCAmelCase = self.current_seq _UpperCAmelCase = self.completed return new_constraint class a : def __init__( self : Optional[int] , __lowerCAmelCase : List[Constraint] ): _UpperCAmelCase = constraints # max # of steps required to fulfill a given constraint _UpperCAmelCase = max([c.seqlen for c in constraints] ) _UpperCAmelCase = len(__lowerCAmelCase ) _UpperCAmelCase = False self.init_state() def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = [] _UpperCAmelCase = None _UpperCAmelCase = [constraint.copy(stateful=__lowerCAmelCase ) for constraint in self.constraints] def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" _UpperCAmelCase = constraint.advance() if isinstance(__lowerCAmelCase , __lowerCAmelCase ): token_list.append(__lowerCAmelCase ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): token_list.extend(__lowerCAmelCase ) else: _UpperCAmelCase = self.inprogress_constraint.advance() if isinstance(__lowerCAmelCase , __lowerCAmelCase ): token_list.append(__lowerCAmelCase ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): token_list.extend(__lowerCAmelCase ) if len(__lowerCAmelCase ) == 0: return None else: return token_list def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Optional[List[int]] ): self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint _UpperCAmelCase , _UpperCAmelCase = self.add(__lowerCAmelCase ) # the entire list of constraints are fulfilled if self.completed: break def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' ) _UpperCAmelCase , _UpperCAmelCase = False, False if self.completed: _UpperCAmelCase = True _UpperCAmelCase = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.inprogress_constraint.update(__lowerCAmelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__lowerCAmelCase ) ) _UpperCAmelCase = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) _UpperCAmelCase = None if len(self.pending_constraints ) == 0: # we're done! _UpperCAmelCase = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(__lowerCAmelCase ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = pending_constraint.update(__lowerCAmelCase ) if not stepped: raise Exception( """`constraint.update(token_id)` is not yielding incremental progress, """ """even though `constraint.does_advance(token_id)` is true.""" ) if complete: self.complete_constraints.append(__lowerCAmelCase ) _UpperCAmelCase = None if not complete and stepped: _UpperCAmelCase = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". _UpperCAmelCase = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. _UpperCAmelCase = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : List[Any]=True ): _UpperCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: _UpperCAmelCase = [ constraint.copy(stateful=__lowerCAmelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: _UpperCAmelCase = self.inprogress_constraint.copy(stateful=__lowerCAmelCase ) _UpperCAmelCase = [constraint.copy() for constraint in self.pending_constraints] return new_state
289
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = get_failure_array(lowercase ) # 2) Step through text searching for pattern _UpperCAmelCase , _UpperCAmelCase = 0, 0 # index into text, pattern while i < len(lowercase ): if pattern[j] == text[i]: if j == (len(lowercase ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: _UpperCAmelCase = failure[j - 1] continue i += 1 return False def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [0] _UpperCAmelCase = 0 _UpperCAmelCase = 1 while j < len(lowercase ): if pattern[i] == pattern[j]: i += 1 elif i > 0: _UpperCAmelCase = failure[i - 1] continue j += 1 failure.append(lowercase ) return failure if __name__ == "__main__": # Test 1) UpperCAmelCase__ = """abc1abc12""" UpperCAmelCase__ = """alskfjaldsabc1abc1abc12k23adsfabcabc""" UpperCAmelCase__ = """alskfjaldsk23adsfabcabc""" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) UpperCAmelCase__ = """ABABX""" UpperCAmelCase__ = """ABABZABABYABABX""" assert kmp(pattern, text) # Test 3) UpperCAmelCase__ = """AAAB""" UpperCAmelCase__ = """ABAAAAAB""" assert kmp(pattern, text) # Test 4) UpperCAmelCase__ = """abcdabcy""" UpperCAmelCase__ = """abcxabcdabxabcdabcdabcy""" assert kmp(pattern, text) # Test 5) UpperCAmelCase__ = """aabaabaaa""" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
289
1
"""simple docstring""" import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets UpperCAmelCase__ = datasets.logging.get_logger(__name__) UpperCAmelCase__ = """\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } """ UpperCAmelCase__ = """\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project's README at https://github.com/google-research/bleurt#readme for more information. """ UpperCAmelCase__ = """ BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: 'scores': List of scores. Examples: >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> bleurt = datasets.load_metric(\"bleurt\") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results[\"scores\"]]) [1.03, 1.04] """ UpperCAmelCase__ = { """bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""", """bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""", """bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""", """bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""", """bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""", """bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""", """BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""", """BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""", """BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""", """BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""", } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): def lowerCAmelCase_ ( self : Union[str, Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[Any] ): # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( """Using default BLEURT-Base checkpoint for sequence maximum length 128. """ """You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" ) _UpperCAmelCase = """bleurt-base-128""" if self.config_name.lower() in CHECKPOINT_URLS: _UpperCAmelCase = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: _UpperCAmelCase = self.config_name.upper() else: raise KeyError( f'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' ) # download the model checkpoint specified by self.config_name and set up the scorer _UpperCAmelCase = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) _UpperCAmelCase = score.BleurtScorer(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ): _UpperCAmelCase = self.scorer.score(references=__lowerCAmelCase , candidates=__lowerCAmelCase ) return {"scores": scores}
289
"""simple docstring""" from sklearn.metrics import recall_score import datasets UpperCAmelCase__ = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ UpperCAmelCase__ = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ UpperCAmelCase__ = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): def lowerCAmelCase_ ( self : Tuple ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]="binary" , __lowerCAmelCase : Any=None , __lowerCAmelCase : int="warn" , ): _UpperCAmelCase = recall_score( __lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase , zero_division=__lowerCAmelCase , ) return {"recall": float(__lowerCAmelCase ) if score.size == 1 else score}
289
1
"""simple docstring""" import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml UpperCAmelCase__ = NewType("""DataClass""", Any) UpperCAmelCase__ = NewType("""DataClassType""", Any) def __UpperCAmelCase ( lowercase ): """simple docstring""" if isinstance(lowercase ,lowercase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' ) def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = {str(lowercase ): choice for choice in choices} return lambda lowercase : str_to_choice.get(lowercase ,lowercase ) def __UpperCAmelCase ( *, lowercase = None ,lowercase = None ,lowercase = dataclasses.MISSING ,lowercase = dataclasses.MISSING ,lowercase = None ,**lowercase ,): """simple docstring""" if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls _UpperCAmelCase = {} if aliases is not None: _UpperCAmelCase = aliases if help is not None: _UpperCAmelCase = help return dataclasses.field(metadata=lowercase ,default=lowercase ,default_factory=lowercase ,**lowercase ) class a ( lowerCAmelCase_ ): _snake_case : Iterable[DataClassType] def __init__( self : int , __lowerCAmelCase : Union[DataClassType, Iterable[DataClassType]] , **__lowerCAmelCase : str ): # To make the default appear when using --help if "formatter_class" not in kwargs: _UpperCAmelCase = ArgumentDefaultsHelpFormatter super().__init__(**__lowerCAmelCase ) if dataclasses.is_dataclass(__lowerCAmelCase ): _UpperCAmelCase = [dataclass_types] _UpperCAmelCase = list(__lowerCAmelCase ) for dtype in self.dataclass_types: self._add_dataclass_arguments(__lowerCAmelCase ) @staticmethod def lowerCAmelCase_ ( __lowerCAmelCase : ArgumentParser , __lowerCAmelCase : dataclasses.Field ): _UpperCAmelCase = f'''--{field.name}''' _UpperCAmelCase = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , __lowerCAmelCase ): raise RuntimeError( """Unresolved type detected, which should have been done with the help of """ """`typing.get_type_hints` method by default""" ) _UpperCAmelCase = kwargs.pop("""aliases""" , [] ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase = [aliases] _UpperCAmelCase = getattr(field.type , """__origin__""" , field.type ) if origin_type is Union or (hasattr(__lowerCAmelCase , """UnionType""" ) and isinstance(__lowerCAmelCase , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(__lowerCAmelCase ) not in field.type.__args__ ): raise ValueError( """Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because""" """ the argument parser only supports one type per argument.""" f''' Problem encountered in field \'{field.name}\'.''' ) if type(__lowerCAmelCase ) not in field.type.__args__: # filter `str` in Union _UpperCAmelCase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] _UpperCAmelCase = getattr(field.type , """__origin__""" , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) _UpperCAmelCase = ( field.type.__args__[0] if isinstance(__lowerCAmelCase , field.type.__args__[1] ) else field.type.__args__[1] ) _UpperCAmelCase = getattr(field.type , """__origin__""" , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) _UpperCAmelCase = {} if origin_type is Literal or (isinstance(field.type , __lowerCAmelCase ) and issubclass(field.type , __lowerCAmelCase )): if origin_type is Literal: _UpperCAmelCase = field.type.__args__ else: _UpperCAmelCase = [x.value for x in field.type] _UpperCAmelCase = make_choice_type_function(kwargs["""choices"""] ) if field.default is not dataclasses.MISSING: _UpperCAmelCase = field.default else: _UpperCAmelCase = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument _UpperCAmelCase = copy(__lowerCAmelCase ) # Hack because type=bool in argparse does not behave as we want. _UpperCAmelCase = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. _UpperCAmelCase = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way _UpperCAmelCase = default # This tells argparse we accept 0 or 1 value after --field_name _UpperCAmelCase = """?""" # This is the value that will get picked if we do --field_name (without value) _UpperCAmelCase = True elif isclass(__lowerCAmelCase ) and issubclass(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase = field.type.__args__[0] _UpperCAmelCase = """+""" if field.default_factory is not dataclasses.MISSING: _UpperCAmelCase = field.default_factory() elif field.default is dataclasses.MISSING: _UpperCAmelCase = True else: _UpperCAmelCase = field.type if field.default is not dataclasses.MISSING: _UpperCAmelCase = field.default elif field.default_factory is not dataclasses.MISSING: _UpperCAmelCase = field.default_factory() else: _UpperCAmelCase = True parser.add_argument(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): _UpperCAmelCase = False parser.add_argument(f'''--no_{field.name}''' , action="""store_false""" , dest=field.name , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : DataClassType ): if hasattr(__lowerCAmelCase , """_argument_group_name""" ): _UpperCAmelCase = self.add_argument_group(dtype._argument_group_name ) else: _UpperCAmelCase = self try: _UpperCAmelCase = get_type_hints(__lowerCAmelCase ) except NameError: raise RuntimeError( f'''Type resolution failed for {dtype}. Try declaring the class in global scope or ''' """removing line of `from __future__ import annotations` which opts in Postponed """ """Evaluation of Annotations (PEP 563)""" ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__lowerCAmelCase ): _UpperCAmelCase = """.""".join(map(__lowerCAmelCase , sys.version_info[:3] ) ) raise RuntimeError( f'''Type resolution failed for {dtype} on Python {python_version}. Try removing ''' """line of `from __future__ import annotations` which opts in union types as """ """`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """ """support Python versions that lower than 3.10, you need to use """ """`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """ """`X | None`.""" ) from ex raise for field in dataclasses.fields(__lowerCAmelCase ): if not field.init: continue _UpperCAmelCase = type_hints[field.name] self._parse_dataclass_field(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Any=False , __lowerCAmelCase : Dict=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=None , ): if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): _UpperCAmelCase = [] if args_filename: args_files.append(Path(__lowerCAmelCase ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values _UpperCAmelCase = ArgumentParser() args_file_parser.add_argument(__lowerCAmelCase , type=__lowerCAmelCase , action="""append""" ) # Use only remaining args for further parsing (remove the args_file_flag) _UpperCAmelCase , _UpperCAmelCase = args_file_parser.parse_known_args(args=__lowerCAmelCase ) _UpperCAmelCase = vars(__lowerCAmelCase ).get(args_file_flag.lstrip("""-""" ) , __lowerCAmelCase ) if cmd_args_file_paths: args_files.extend([Path(__lowerCAmelCase ) for p in cmd_args_file_paths] ) _UpperCAmelCase = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last _UpperCAmelCase = file_args + args if args is not None else file_args + sys.argv[1:] _UpperCAmelCase , _UpperCAmelCase = self.parse_known_args(args=__lowerCAmelCase ) _UpperCAmelCase = [] for dtype in self.dataclass_types: _UpperCAmelCase = {f.name for f in dataclasses.fields(__lowerCAmelCase ) if f.init} _UpperCAmelCase = {k: v for k, v in vars(__lowerCAmelCase ).items() if k in keys} for k in keys: delattr(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = dtype(**__lowerCAmelCase ) outputs.append(__lowerCAmelCase ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(__lowerCAmelCase ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' ) return (*outputs,) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict[str, Any] , __lowerCAmelCase : bool = False ): _UpperCAmelCase = set(args.keys() ) _UpperCAmelCase = [] for dtype in self.dataclass_types: _UpperCAmelCase = {f.name for f in dataclasses.fields(__lowerCAmelCase ) if f.init} _UpperCAmelCase = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) _UpperCAmelCase = dtype(**__lowerCAmelCase ) outputs.append(__lowerCAmelCase ) if not allow_extra_keys and unused_keys: raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(__lowerCAmelCase )}''' ) return tuple(__lowerCAmelCase ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : bool = False ): with open(Path(__lowerCAmelCase ) , encoding="""utf-8""" ) as open_json_file: _UpperCAmelCase = json.loads(open_json_file.read() ) _UpperCAmelCase = self.parse_dict(__lowerCAmelCase , allow_extra_keys=__lowerCAmelCase ) return tuple(__lowerCAmelCase ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : bool = False ): _UpperCAmelCase = self.parse_dict(yaml.safe_load(Path(__lowerCAmelCase ).read_text() ) , allow_extra_keys=__lowerCAmelCase ) return tuple(__lowerCAmelCase )
289
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase__ = """platform""" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class a : _snake_case : Tuple = PegasusConfig _snake_case : int = {} _snake_case : str = 'gelu' def __init__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=99 , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=20 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Any=0 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = eos_token_id _UpperCAmelCase = pad_token_id _UpperCAmelCase = bos_token_id def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) _UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) _UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _UpperCAmelCase = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return config, inputs_dict def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ): _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ): _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _UpperCAmelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ,): """simple docstring""" if attention_mask is None: _UpperCAmelCase = np.not_equal(lowercase ,config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: _UpperCAmelCase = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ), ] ,axis=-1 ,) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Dict = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) _snake_case : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () _snake_case : Optional[Any] = True _snake_case : List[str] = False _snake_case : Dict = False _snake_case : str = False def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = FlaxPegasusModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = model_class(__lowerCAmelCase ) @jax.jit def encode_jitted(__lowerCAmelCase : str , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Dict ): return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase ) with self.subTest("""JIT Enabled""" ): _UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = model_class(__lowerCAmelCase ) _UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) _UpperCAmelCase = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(__lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ): return model.decode( decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , ) with self.subTest("""JIT Enabled""" ): _UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCAmelCase_ ( self : Optional[int] ): for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__lowerCAmelCase ) _UpperCAmelCase = np.ones((1, 1) ) _UpperCAmelCase = model(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) _UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) _UpperCAmelCase = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] _UpperCAmelCase = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] _UpperCAmelCase = tokenizer(__lowerCAmelCase , return_tensors="""np""" , truncation=__lowerCAmelCase , max_length=512 , padding=__lowerCAmelCase ) _UpperCAmelCase = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences _UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) assert tgt_text == decoded
289
1
"""simple docstring""" import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger UpperCAmelCase__ = get_logger(__name__) UpperCAmelCase__ = Path(__file__).parent / """model_card_template.md""" UpperCAmelCase__ = uuida().hex UpperCAmelCase__ = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES UpperCAmelCase__ = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES UpperCAmelCase__ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/""" def __UpperCAmelCase ( lowercase = None ): """simple docstring""" _UpperCAmelCase = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}''' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'''; torch/{_torch_version}''' if is_flax_available(): ua += f'''; jax/{_jax_version}''' ua += f'''; flax/{_flax_version}''' if is_onnx_available(): ua += f'''; onnxruntime/{_onnxruntime_version}''' # CI will set this value to True if os.environ.get("""DIFFUSERS_IS_CI""" ,"""""" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(lowercase ,lowercase ): ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() ) elif isinstance(lowercase ,lowercase ): ua += "; " + user_agent return ua def __UpperCAmelCase ( lowercase ,lowercase = None ,lowercase = None ): """simple docstring""" if token is None: _UpperCAmelCase = HfFolder.get_token() if organization is None: _UpperCAmelCase = whoami(lowercase )["""name"""] return f'''{username}/{model_id}''' else: return f'''{organization}/{model_id}''' def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if not is_jinja_available(): raise ValueError( """Modelcard rendering is based on Jinja templates.""" """ Please make sure to have `jinja` installed before using `create_model_card`.""" """ To install it, please run `pip install Jinja2`.""" ) if hasattr(lowercase ,"""local_rank""" ) and args.local_rank not in [-1, 0]: return _UpperCAmelCase = args.hub_token if hasattr(lowercase ,"""hub_token""" ) else None _UpperCAmelCase = get_full_repo_name(lowercase ,token=lowercase ) _UpperCAmelCase = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="""en""" ,license="""apache-2.0""" ,library_name="""diffusers""" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=lowercase ,model_name=lowercase ,repo_name=lowercase ,dataset_name=args.dataset_name if hasattr(lowercase ,"""dataset_name""" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(lowercase ,"""gradient_accumulation_steps""" ) else None ) ,adam_betaa=args.adam_betaa if hasattr(lowercase ,"""adam_beta1""" ) else None ,adam_betaa=args.adam_betaa if hasattr(lowercase ,"""adam_beta2""" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(lowercase ,"""adam_weight_decay""" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(lowercase ,"""adam_epsilon""" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(lowercase ,"""lr_scheduler""" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(lowercase ,"""lr_warmup_steps""" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(lowercase ,"""ema_inv_gamma""" ) else None ,ema_power=args.ema_power if hasattr(lowercase ,"""ema_power""" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(lowercase ,"""ema_max_decay""" ) else None ,mixed_precision=args.mixed_precision ,) _UpperCAmelCase = os.path.join(args.output_dir ,"""README.md""" ) model_card.save(lowercase ) def __UpperCAmelCase ( lowercase ,lowercase = None ): """simple docstring""" if resolved_file is None or commit_hash is not None: return commit_hash _UpperCAmelCase = str(Path(lowercase ).as_posix() ) _UpperCAmelCase = re.search(R"""snapshots/([^/]+)/""" ,lowercase ) if search is None: return None _UpperCAmelCase = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(lowercase ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. UpperCAmelCase__ = os.path.expanduser( os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface""")) ) UpperCAmelCase__ = os.path.join(hf_cache_home, """diffusers""") def __UpperCAmelCase ( lowercase = None ,lowercase = None ): """simple docstring""" if new_cache_dir is None: _UpperCAmelCase = DIFFUSERS_CACHE if old_cache_dir is None: _UpperCAmelCase = old_diffusers_cache _UpperCAmelCase = Path(lowercase ).expanduser() _UpperCAmelCase = Path(lowercase ).expanduser() for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): _UpperCAmelCase = new_cache_dir / old_blob_path.relative_to(lowercase ) new_blob_path.parent.mkdir(parents=lowercase ,exist_ok=lowercase ) os.replace(lowercase ,lowercase ) try: os.symlink(lowercase ,lowercase ) except OSError: logger.warning( """Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). UpperCAmelCase__ = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""") if not os.path.isfile(cache_version_file): UpperCAmelCase__ = 0 else: with open(cache_version_file) as f: try: UpperCAmelCase__ = int(f.read()) except ValueError: UpperCAmelCase__ = 0 if cache_version < 1: UpperCAmelCase__ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( """The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """ """existing cached models. This is a one-time operation, you can interrupt it or run it """ """later by calling `diffusers.utils.hub_utils.move_cache()`.""" ) try: move_cache() except Exception as e: UpperCAmelCase__ = """\n""".join(traceback.format_tb(e.__traceback__)) logger.error( F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease ''' """file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """ """message and we will do our best to help.""" ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, """w""") as f: f.write("""1""") except Exception: logger.warning( F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure ''' """the directory exists and can be written to.""" ) def __UpperCAmelCase ( lowercase ,lowercase = None ): """simple docstring""" if variant is not None: _UpperCAmelCase = weights_name.split(""".""" ) _UpperCAmelCase = splits[:-1] + [variant] + splits[-1:] _UpperCAmelCase = """.""".join(lowercase ) return weights_name def __UpperCAmelCase ( lowercase ,*, lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase=None ,): """simple docstring""" _UpperCAmelCase = str(lowercase ) if os.path.isfile(lowercase ): return pretrained_model_name_or_path elif os.path.isdir(lowercase ): if os.path.isfile(os.path.join(lowercase ,lowercase ) ): # Load from a PyTorch checkpoint _UpperCAmelCase = os.path.join(lowercase ,lowercase ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(lowercase ,lowercase ,lowercase ) ): _UpperCAmelCase = os.path.join(lowercase ,lowercase ,lowercase ) return model_file else: raise EnvironmentError( f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(lowercase ).base_version ) >= version.parse("""0.20.0""" ) ): try: _UpperCAmelCase = hf_hub_download( lowercase ,filename=_add_variant(lowercase ,lowercase ) ,cache_dir=lowercase ,force_download=lowercase ,proxies=lowercase ,resume_download=lowercase ,local_files_only=lowercase ,use_auth_token=lowercase ,user_agent=lowercase ,subfolder=lowercase ,revision=revision or commit_hash ,) warnings.warn( f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,lowercase ,) return model_file except: # noqa: E722 warnings.warn( f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(lowercase ,lowercase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(lowercase ,lowercase )}\' so that the correct variant file can be added.''' ,lowercase ,) try: # 2. Load model file as usual _UpperCAmelCase = hf_hub_download( lowercase ,filename=lowercase ,cache_dir=lowercase ,force_download=lowercase ,proxies=lowercase ,resume_download=lowercase ,local_files_only=lowercase ,use_auth_token=lowercase ,user_agent=lowercase ,subfolder=lowercase ,revision=revision or commit_hash ,) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ''' """listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """ """token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """ """login`.""" ) except RevisionNotFoundError: raise EnvironmentError( f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ''' """this model name. Check the model page at """ f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' ) except EntryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' ) except HTTPError as err: raise EnvironmentError( f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' ) except ValueError: raise EnvironmentError( f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it''' f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a''' f''' directory containing a file named {weights_name} or''' """ \nCheckout your internet connection or see how to run the library in""" """ offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" ) except EnvironmentError: raise EnvironmentError( f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ''' """'https://huggingface.co/models', make sure you don't have a local directory with the same name. """ f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ''' f'''containing a file named {weights_name}''' )
289
"""simple docstring""" import math def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [] _UpperCAmelCase = 2 _UpperCAmelCase = int(math.sqrt(lowercase ) ) # Size of every segment _UpperCAmelCase = [True] * (end + 1) _UpperCAmelCase = [] while start <= end: if temp[start] is True: in_prime.append(lowercase ) for i in range(start * start ,end + 1 ,lowercase ): _UpperCAmelCase = False start += 1 prime += in_prime _UpperCAmelCase = end + 1 _UpperCAmelCase = min(2 * end ,lowercase ) while low <= n: _UpperCAmelCase = [True] * (high - low + 1) for each in in_prime: _UpperCAmelCase = math.floor(low / each ) * each if t < low: t += each for j in range(lowercase ,high + 1 ,lowercase ): _UpperCAmelCase = False for j in range(len(lowercase ) ): if temp[j] is True: prime.append(j + low ) _UpperCAmelCase = high + 1 _UpperCAmelCase = min(high + end ,lowercase ) return prime print(sieve(1_0**6))
289
1