code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Dict =logging.get_logger(__name__) _A : List[Any] ={ '''google/vivit-b-16x2-kinetics400''': ( '''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json''' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class _lowercase ( _lowercase ): a = """vivit""" def __init__( self: List[str] , UpperCamelCase__: List[Any]=224 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: int=[2, 16, 16] , UpperCamelCase__: Optional[Any]=3 , UpperCamelCase__: Dict=768 , UpperCamelCase__: Optional[int]=12 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: List[str]=3_072 , UpperCamelCase__: Optional[int]="gelu_fast" , UpperCamelCase__: Union[str, Any]=0.0 , UpperCamelCase__: Any=0.0 , UpperCamelCase__: Optional[Any]=0.02 , UpperCamelCase__: Optional[Any]=1e-06 , UpperCamelCase__: List[str]=True , **UpperCamelCase__: List[Any] , ): lowerCamelCase__ : List[Any] = hidden_size lowerCamelCase__ : Dict = num_hidden_layers lowerCamelCase__ : List[str] = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : Union[str, Any] = hidden_act lowerCamelCase__ : Dict = hidden_dropout_prob lowerCamelCase__ : Tuple = attention_probs_dropout_prob lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : Optional[int] = layer_norm_eps lowerCamelCase__ : int = image_size lowerCamelCase__ : str = num_frames lowerCamelCase__ : Optional[Any] = tubelet_size lowerCamelCase__ : int = num_channels lowerCamelCase__ : Any = qkv_bias super().__init__(**UpperCamelCase__ )
41
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase : List[str] = logging.get_logger(__name__) class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["audio_values", "audio_mask"] def __init__( self , _a=2_048 , _a=1 , _a=[16, 16] , _a=128 , _a=44_100 , _a=86 , _a=2_048 , _a=0.0 , **_a , ): """simple docstring""" super().__init__( feature_size=_a , sampling_rate=_a , padding_value=_a , **_a , ) lowerCamelCase = spectrogram_length lowerCamelCase = num_channels lowerCamelCase = patch_size lowerCamelCase = feature_size // self.patch_size[1] lowerCamelCase = n_fft lowerCamelCase = sampling_rate // hop_length_to_sampling_rate lowerCamelCase = sampling_rate lowerCamelCase = padding_value lowerCamelCase = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=_a , norm="""slaney""" , mel_scale="""slaney""" , ).T def _lowerCAmelCase ( self , _a ): """simple docstring""" lowerCamelCase = spectrogram( _a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , ) lowerCamelCase = log_spec[:, :-1] lowerCamelCase = log_spec - 20.0 lowerCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , _a , _a = None , _a = True , _a = None , _a = False , _a = False , **_a , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( """This feature extractor is set to support sampling rate""" f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled' f' with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) lowerCamelCase = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) lowerCamelCase = is_batched_numpy or ( isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_a , np.ndarray ): lowerCamelCase = np.asarray(_a , dtype=np.floataa ) elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowerCamelCase = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , _a ): lowerCamelCase = [np.asarray(_a , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowerCamelCase = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowerCamelCase = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowerCamelCase = np.array(_a ).astype(np.floataa ) # convert into correct format for padding lowerCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowerCamelCase = np.ones([len(_a ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowerCamelCase = padded_audio_features * self.padding_value for i in range(len(_a ) ): lowerCamelCase = audio_features[i] lowerCamelCase = feature # return as BatchFeature if return_attention_mask: lowerCamelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask} else: lowerCamelCase = {"""audio_values""": padded_audio_features} lowerCamelCase = BatchFeature(data=_a , tensor_type=_a ) return encoded_inputs
291
0
'''simple docstring''' from __future__ import annotations class __UpperCAmelCase : def __init__( self , lowerCAmelCase_ ): """simple docstring""" _snake_case = TypeError( 'Matrices must be formed from a list of zero or more lists containing at ' 'least one and the same number of values, each of which must be of type ' 'int or float.' ) if len(lowerCAmelCase_ ) != 0: _snake_case = len(rows[0] ) if cols == 0: raise error for row in rows: if len(lowerCAmelCase_ ) != cols: raise error for value in row: if not isinstance(lowerCAmelCase_ , (int, float) ): raise error _snake_case = rows else: _snake_case = [] def lowerCamelCase ( self ): """simple docstring""" return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def lowerCamelCase ( self ): """simple docstring""" return len(self.rows ) @property def lowerCamelCase ( self ): """simple docstring""" return len(self.rows[0] ) @property def lowerCamelCase ( self ): """simple docstring""" return (self.num_rows, self.num_columns) @property def lowerCamelCase ( self ): """simple docstring""" return self.order[0] == self.order[1] def lowerCamelCase ( self ): """simple docstring""" _snake_case = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def lowerCamelCase ( self ): """simple docstring""" return bool(self.determinant() ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(lowerCAmelCase_ ).determinant() def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if (row + column) % 2 == 0: return self.get_minor(lowerCAmelCase_ , lowerCAmelCase_ ) return -1 * self.get_minor(lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" return Matrix( [ [self.get_minor(lowerCAmelCase_ , lowerCAmelCase_ ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def lowerCamelCase ( self ): """simple docstring""" return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.determinant() if not determinant: raise TypeError('Only matrices with a non-zero determinant have an inverse' ) return self.adjugate() * (1 / determinant) def __repr__( self ): """simple docstring""" return str(self.rows ) def __str__( self ): """simple docstring""" if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ '[' + '. '.join([str(lowerCAmelCase_ ) for value in row] ) + '.]' for row in self.rows ] ) + "]" ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ): """simple docstring""" _snake_case = TypeError('Row must be a list containing all ints and/or floats' ) if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise type_error for value in row: if not isinstance(lowerCAmelCase_ , (int, float) ): raise type_error if len(lowerCAmelCase_ ) != self.num_columns: raise ValueError( 'Row must be equal in length to the other rows in the matrix' ) if position is None: self.rows.append(lowerCAmelCase_ ) else: _snake_case = self.rows[0:position] + [row] + self.rows[position:] def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ): """simple docstring""" _snake_case = TypeError( 'Column must be a list containing all ints and/or floats' ) if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise type_error for value in column: if not isinstance(lowerCAmelCase_ , (int, float) ): raise type_error if len(lowerCAmelCase_ ) != self.num_rows: raise ValueError( 'Column must be equal in length to the other columns in the matrix' ) if position is None: _snake_case = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: _snake_case = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self , lowerCAmelCase_ ): """simple docstring""" if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return NotImplemented return self.rows == other.rows def __ne__( self , lowerCAmelCase_ ): """simple docstring""" return not self == other def __neg__( self ): """simple docstring""" return self * -1 def __add__( self , lowerCAmelCase_ ): """simple docstring""" if self.order != other.order: raise ValueError('Addition requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self , lowerCAmelCase_ ): """simple docstring""" if self.order != other.order: raise ValueError('Subtraction requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self , lowerCAmelCase_ ): """simple docstring""" if isinstance(lowerCAmelCase_ , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): if self.num_columns != other.num_rows: raise ValueError( 'The number of columns in the first matrix must ' 'be equal to the number of rows in the second' ) return Matrix( [ [Matrix.dot_product(lowerCAmelCase_ , lowerCAmelCase_ ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( 'A Matrix can only be multiplied by an int, float, or another matrix' ) def __pow__( self , lowerCAmelCase_ ): """simple docstring""" if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise TypeError('A Matrix can only be raised to the power of an int' ) if not self.is_square: raise ValueError('Only square matrices can be raised to a power' ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( 'Only invertable matrices can be raised to a negative power' ) _snake_case = self for _ in range(other - 1 ): result *= self return result @classmethod def lowerCamelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return sum(row[i] * column[i] for i in range(len(lowerCAmelCase_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
42
"""simple docstring""" from math import ceil def a__ ( snake_case__ , snake_case__ ) -> Optional[int]: lowerCamelCase = list(range(0 , snake_case__ ) ) lowerCamelCase = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check lowerCamelCase = [] for i in device_map_blocks: if device_map_blocks.count(snake_case__ ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(snake_case__ ) # Missing blocks lowerCamelCase = [i for i in blocks if i not in device_map_blocks] lowerCamelCase = [i for i in device_map_blocks if i not in blocks] if len(snake_case__ ) != 0: raise ValueError( """Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.""" """ These attention blocks were specified more than once: """ + str(snake_case__ ) ) if len(snake_case__ ) != 0: raise ValueError( """There are attention blocks for this model that are not specified in the device_map. Add these attention """ """blocks to a device on the device_map: """ + str(snake_case__ ) ) if len(snake_case__ ) != 0: raise ValueError( """The device_map contains more attention blocks than this model has. Remove these from the device_map:""" + str(snake_case__ ) ) def a__ ( snake_case__ , snake_case__ ) -> List[Any]: lowerCamelCase = list(range(snake_case__ ) ) lowerCamelCase = int(ceil(n_layers / len(snake_case__ ) ) ) lowerCamelCase = [layers[i : i + n_blocks] for i in range(0 , snake_case__ , snake_case__ )] return dict(zip(snake_case__ , snake_case__ ) )
291
0
__lowercase = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)] def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Tuple = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 100_000] number //= 100_000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution __lowercase = [None] * 1000_0000 __lowercase = True __lowercase = False def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore __UpperCamelCase :Optional[Any] = chain(next_number(SCREAMING_SNAKE_CASE ) ) __UpperCamelCase :Union[str, Any] = number_chain while number < 10_000_000: __UpperCamelCase :List[Any] = number_chain number *= 10 return number_chain def lowerCamelCase ( SCREAMING_SNAKE_CASE = 10_000_000 ): '''simple docstring''' for i in range(1 , SCREAMING_SNAKE_CASE ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution() = }')
43
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __magic_name__ ( unittest.TestCase ): '''simple docstring''' def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ): """simple docstring""" lowerCamelCase = parent lowerCamelCase = batch_size lowerCamelCase = seq_length lowerCamelCase = is_training lowerCamelCase = use_attention_mask lowerCamelCase = use_token_type_ids lowerCamelCase = use_labels lowerCamelCase = vocab_size lowerCamelCase = hidden_size lowerCamelCase = num_hidden_layers lowerCamelCase = num_attention_heads lowerCamelCase = intermediate_size lowerCamelCase = hidden_act lowerCamelCase = hidden_dropout_prob lowerCamelCase = attention_probs_dropout_prob lowerCamelCase = max_position_embeddings lowerCamelCase = type_vocab_size lowerCamelCase = type_sequence_label_size lowerCamelCase = initializer_range lowerCamelCase = num_choices def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase = None if self.use_attention_mask: lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase = None if self.use_token_type_ids: lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = True __UpperCamelCase = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = FlaxRoFormerModelTester(self ) @slow def _lowerCAmelCase ( self ): """simple docstring""" for model_class_name in self.all_model_classes: lowerCamelCase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_a ) lowerCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(_a ) @require_flax class __magic_name__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) lowerCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase = model(_a )[0] lowerCamelCase = 50_000 lowerCamelCase = (1, 6, vocab_size) self.assertEqual(output.shape , _a ) lowerCamelCase = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
291
0
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __A ( SCREAMING_SNAKE_CASE_ ): def __init__( self , a__ , a__ , a__ , a__ = None , ): super().__init__() self.register_modules(transformer=a__ , vae=a__ , scheduler=a__ ) # create a imagenet -> id dictionary for easier use _lowerCAmelCase : Optional[int] = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(""",""" ): _lowerCAmelCase : Any = int(a__ ) _lowerCAmelCase : List[Any] = dict(sorted(self.labels.items() ) ) def __A ( self , a__ ): if not isinstance(a__ , a__ ): _lowerCAmelCase : Union[str, Any] = list(a__ ) for l in label: if l not in self.labels: raise ValueError( F"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}." ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , a__ , a__ = 4.0 , a__ = None , a__ = 50 , a__ = "pil" , a__ = True , ): _lowerCAmelCase : Optional[int] = len(a__ ) _lowerCAmelCase : Union[str, Any] = self.transformer.config.sample_size _lowerCAmelCase : str = self.transformer.config.in_channels _lowerCAmelCase : Any = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=a__ , device=self.device , dtype=self.transformer.dtype , ) _lowerCAmelCase : Optional[int] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents _lowerCAmelCase : Any = torch.tensor(a__ , device=self.device ).reshape(-1 ) _lowerCAmelCase : List[str] = torch.tensor([1000] * batch_size , device=self.device ) _lowerCAmelCase : int = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(a__ ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: _lowerCAmelCase : Dict = latent_model_input[: len(a__ ) // 2] _lowerCAmelCase : Any = torch.cat([half, half] , dim=0 ) _lowerCAmelCase : Dict = self.scheduler.scale_model_input(a__ , a__ ) _lowerCAmelCase : List[str] = t if not torch.is_tensor(a__ ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) _lowerCAmelCase : List[Any] = latent_model_input.device.type == """mps""" if isinstance(a__ , a__ ): _lowerCAmelCase : List[Any] = torch.floataa if is_mps else torch.floataa else: _lowerCAmelCase : Tuple = torch.intaa if is_mps else torch.intaa _lowerCAmelCase : Union[str, Any] = torch.tensor([timesteps] , dtype=a__ , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: _lowerCAmelCase : Any = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML _lowerCAmelCase : Union[str, Any] = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output _lowerCAmelCase : str = self.transformer( a__ , timestep=a__ , class_labels=a__ ).sample # perform guidance if guidance_scale > 1: _lowerCAmelCase , _lowerCAmelCase : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] _lowerCAmelCase , _lowerCAmelCase : List[str] = torch.split(a__ , len(a__ ) // 2 , dim=0 ) _lowerCAmelCase : Union[str, Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps) _lowerCAmelCase : Any = torch.cat([half_eps, half_eps] , dim=0 ) _lowerCAmelCase : List[Any] = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: _lowerCAmelCase , _lowerCAmelCase : str = torch.split(a__ , a__ , dim=1 ) else: _lowerCAmelCase : Any = noise_pred # compute previous image: x_t -> x_t-1 _lowerCAmelCase : str = self.scheduler.step(a__ , a__ , a__ ).prev_sample if guidance_scale > 1: _lowerCAmelCase , _lowerCAmelCase : Any = latent_model_input.chunk(2 , dim=0 ) else: _lowerCAmelCase : List[Any] = latent_model_input _lowerCAmelCase : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents _lowerCAmelCase : Optional[Any] = self.vae.decode(a__ ).sample _lowerCAmelCase : Optional[int] = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _lowerCAmelCase : List[str] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": _lowerCAmelCase : Union[str, Any] = self.numpy_to_pil(a__ ) if not return_dict: return (samples,) return ImagePipelineOutput(images=a__ )
44
"""simple docstring""" from typing import Any def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> list: _validation( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) # Creates data structures and fill initial step lowerCamelCase = {} lowerCamelCase = {} for state in states_space: lowerCamelCase = observations_space[0] lowerCamelCase = ( initial_probabilities[state] * emission_probabilities[state][observation] ) lowerCamelCase = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(snake_case__ ) ): lowerCamelCase = observations_space[o] lowerCamelCase = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function lowerCamelCase = """""" lowerCamelCase = -1 for k_state in states_space: lowerCamelCase = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: lowerCamelCase = probability lowerCamelCase = k_state # Update probabilities and pointers dicts lowerCamelCase = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) lowerCamelCase = arg_max # The final observation lowerCamelCase = observations_space[len(snake_case__ ) - 1] # argmax for given final observation lowerCamelCase = """""" lowerCamelCase = -1 for k_state in states_space: lowerCamelCase = probabilities[(k_state, final_observation)] if probability > max_probability: lowerCamelCase = probability lowerCamelCase = k_state lowerCamelCase = arg_max # Process pointers backwards lowerCamelCase = last_state lowerCamelCase = [] for o in range(len(snake_case__ ) - 1 , -1 , -1 ): result.append(snake_case__ ) lowerCamelCase = pointers[previous, observations_space[o]] result.reverse() return result def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None: _validate_not_empty( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) _validate_lists(snake_case__ , snake_case__ ) _validate_dicts( snake_case__ , snake_case__ , snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None: if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("""There's an empty parameter""" ) def a__ ( snake_case__ , snake_case__ ) -> None: _validate_list(snake_case__ , """observations_space""" ) _validate_list(snake_case__ , """states_space""" ) def a__ ( snake_case__ , snake_case__ ) -> None: if not isinstance(_object , snake_case__ ): lowerCamelCase = F'{var_name} must be a list' raise ValueError(snake_case__ ) else: for x in _object: if not isinstance(snake_case__ , snake_case__ ): lowerCamelCase = F'{var_name} must be a list of strings' raise ValueError(snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , ) -> None: _validate_dict(snake_case__ , """initial_probabilities""" , snake_case__ ) _validate_nested_dict(snake_case__ , """transition_probabilities""" ) _validate_nested_dict(snake_case__ , """emission_probabilities""" ) def a__ ( snake_case__ , snake_case__ ) -> None: _validate_dict(_object , snake_case__ , snake_case__ ) for x in _object.values(): _validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False ) -> None: if not isinstance(_object , snake_case__ ): lowerCamelCase = F'{var_name} must be a dict' raise ValueError(snake_case__ ) if not all(isinstance(snake_case__ , snake_case__ ) for x in _object ): lowerCamelCase = F'{var_name} all keys must be strings' raise ValueError(snake_case__ ) if not all(isinstance(snake_case__ , snake_case__ ) for x in _object.values() ): lowerCamelCase = """nested dictionary """ if nested else """""" lowerCamelCase = F'{var_name} {nested_text}all values must be {value_type.__name__}' raise ValueError(snake_case__ ) if __name__ == "__main__": from doctest import testmod testmod()
291
0
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class __lowerCAmelCase : '''simple docstring''' def __UpperCAmelCase ( self ): torch.manual_seed(0 ) __a = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) __a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) __a = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) __a = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=_a , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) __a = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __UpperCAmelCase ( self ): torch.manual_seed(0 ) __a = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) __a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) __a = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.414 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) __a = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=_a , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) __a = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , ) torch.manual_seed(0 ) __a = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __UpperCAmelCase ( self ): __a = self.get_dummy_components() __a = self.pipeline_class(**_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) __a = self.get_dummy_inputs(_a ) __a = inputs['''prompt'''] __a = inputs['''generator'''] __a = inputs['''num_inference_steps'''] __a = inputs['''output_type'''] if "image" in inputs: __a = inputs['''image'''] else: __a = None if "mask_image" in inputs: __a = inputs['''mask_image'''] else: __a = None if "original_image" in inputs: __a = inputs['''original_image'''] else: __a = None __a , __a = pipe.encode_prompt(_a ) # inputs with prompt converted to embeddings __a = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: __a = image if mask_image is not None: __a = mask_image if original_image is not None: __a = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(_a , _a , _a ) __a = pipe(**_a )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_a ) __a = self.pipeline_class.from_pretrained(_a ) pipe_loaded.to(_a ) pipe_loaded.set_progress_bar_config(disable=_a ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(_a , _a ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) __a = self.get_dummy_inputs(_a ) __a = inputs['''generator'''] __a = inputs['''num_inference_steps'''] __a = inputs['''output_type'''] # inputs with prompt converted to embeddings __a = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: __a = image if mask_image is not None: __a = mask_image if original_image is not None: __a = original_image __a = pipe_loaded(**_a )[0] __a = np.abs(to_np(_a ) - to_np(_a ) ).max() self.assertLess(_a , 1E-4 ) def __UpperCAmelCase ( self ): __a = self.get_dummy_components() __a = self.pipeline_class(**_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) __a = self.get_dummy_inputs(_a ) __a = pipe(**_a )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_a ) __a = self.pipeline_class.from_pretrained(_a ) pipe_loaded.to(_a ) pipe_loaded.set_progress_bar_config(disable=_a ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests __a = self.get_dummy_inputs(_a ) __a = pipe_loaded(**_a )[0] __a = np.abs(to_np(_a ) - to_np(_a ) ).max() self.assertLess(_a , 1E-4 )
45
"""simple docstring""" import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase : Dict = logging.get_logger(__name__) def a__ ( snake_case__ ) -> Dict: lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" ) if "model" in sd.keys(): lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" )["""model"""] # pop unnecessary weights lowerCamelCase = [ """decoder.version""", """decoder.output_projection.weight""", ] for key in keys_to_delete: if key in sd: sd.pop(snake_case__ ) lowerCamelCase = { """decoder.project_in_dim.weight""": """decoder.project_in.weight""", """decoder.project_out_dim.weight""": """decoder.project_out.weight""", """decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""", """decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""", } for old_key, new_key in keys_to_rename.items(): if old_key in sd: lowerCamelCase = sd.pop(snake_case__ ) lowerCamelCase = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: lowerCamelCase = sd[key] # We split QKV in separate Q,K,V lowerCamelCase = key.replace(""".qkv_proj.""" , """.q_proj.""" ) lowerCamelCase = key.replace(""".qkv_proj.""" , """.k_proj.""" ) lowerCamelCase = key.replace(""".qkv_proj.""" , """.v_proj.""" ) lowerCamelCase = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 lowerCamelCase , lowerCamelCase , lowerCamelCase = torch.split(snake_case__ , depth // 3 , dim=0 ) lowerCamelCase = q lowerCamelCase = k lowerCamelCase = v del sd[key] return sd @torch.no_grad() def a__ ( snake_case__ , snake_case__ , snake_case__=None ) -> Tuple: lowerCamelCase = load_checkpoint(snake_case__ ) if config is not None: lowerCamelCase = OPTConfig.from_pretrained(snake_case__ ) else: lowerCamelCase = OPTConfig() lowerCamelCase = OPTModel(snake_case__ ).half().eval() model.load_state_dict(snake_case__ ) # Check results Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) model.save_pretrained(snake_case__ ) if __name__ == "__main__": lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--fairseq_path""", type=str, help=( """path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:""" """ https://huggingface.co/models?other=opt_metasq""" ), ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""") lowerCAmelCase : Optional[Any] = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
291
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ = { "configuration_x_clip": [ "XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "XCLIPConfig", "XCLIPTextConfig", "XCLIPVisionConfig", ], "processing_x_clip": ["XCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ "XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "XCLIPModel", "XCLIPPreTrainedModel", "XCLIPTextModel", "XCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
46
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class __magic_name__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = tempfile.mkdtemp() # fmt: off lowerCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""] # fmt: on lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) lowerCamelCase = { """do_resize""": True, """size""": {"""height""": 18, """width""": 18}, """do_normalize""": True, """image_mean""": [0.5, 0.5, 0.5], """image_std""": [0.5, 0.5, 0.5], } lowerCamelCase = os.path.join(self.tmpdirname , _a ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(_a , _a ) def _lowerCAmelCase ( self , **_a ): """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **_a ) def _lowerCAmelCase ( self , **_a ): """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a ) def _lowerCAmelCase ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_tokenizer() lowerCamelCase = self.get_image_processor() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowerCamelCase = self.get_image_processor(do_normalize=_a , padding_value=1.0 ) lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = self.prepare_image_inputs() lowerCamelCase = image_processor(_a , return_tensors="""np""" ) lowerCamelCase = processor(images=_a , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = """lower newer""" lowerCamelCase = processor(text=_a ) lowerCamelCase = tokenizer(_a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = """lower newer""" lowerCamelCase = self.prepare_image_inputs() lowerCamelCase = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with self.assertRaises(_a ): processor() def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase = processor.batch_decode(_a ) lowerCamelCase = tokenizer.batch_decode(_a ) self.assertListEqual(_a , _a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = """lower newer""" lowerCamelCase = self.prepare_image_inputs() lowerCamelCase = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
291
0
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A__ ( A__ ): A__ = ['image_processor', 'tokenizer'] A__ = 'LayoutLMv2ImageProcessor' A__ = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast') def __init__( self : Tuple , _a : List[Any]=None , _a : Any=None , **_a : int ) -> str: '''simple docstring''' if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , _a , ) _SCREAMING_SNAKE_CASE =kwargs.pop('feature_extractor' ) _SCREAMING_SNAKE_CASE =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(_a , _a ) def __call__( self : int , _a : List[str] , _a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _a : Union[List[List[int]], List[List[List[int]]]] = None , _a : Optional[Union[List[int], List[List[int]]]] = None , _a : bool = True , _a : Union[bool, str, PaddingStrategy] = False , _a : Union[bool, str, TruncationStrategy] = None , _a : Optional[int] = None , _a : int = 0 , _a : Optional[int] = None , _a : Optional[bool] = None , _a : Optional[bool] = None , _a : bool = False , _a : bool = False , _a : bool = False , _a : bool = False , _a : bool = True , _a : Optional[Union[str, TensorType]] = None , **_a : List[str] , ) -> BatchEncoding: '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes ' 'if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' ) # first, apply the image processor _SCREAMING_SNAKE_CASE =self.image_processor(images=_a , return_tensors=_a ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(_a , _a ): _SCREAMING_SNAKE_CASE =[text] # add batch dimension (as the image processor always adds a batch dimension) _SCREAMING_SNAKE_CASE =features['words'] _SCREAMING_SNAKE_CASE =self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , ) # add pixel values _SCREAMING_SNAKE_CASE =features.pop('pixel_values' ) if return_overflowing_tokens is True: _SCREAMING_SNAKE_CASE =self.get_overflowing_images(_a , encoded_inputs['overflow_to_sample_mapping'] ) _SCREAMING_SNAKE_CASE =images return encoded_inputs def A ( self : Any , _a : int , _a : Optional[Any] ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =[] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(_a ) != len(_a ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' f" {len(_a )} and {len(_a )}" ) return images_with_overflow def A ( self : Dict , *_a : Any , **_a : Tuple ) -> int: '''simple docstring''' return self.tokenizer.batch_decode(*_a , **_a ) def A ( self : List[Any] , *_a : Any , **_a : Optional[int] ) -> List[Any]: '''simple docstring''' return self.tokenizer.decode(*_a , **_a ) @property def A ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "image"] @property def A ( self : int ) -> List[Any]: '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _a , ) return self.image_processor_class @property def A ( self : List[Any] ) -> Dict: '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _a , ) return self.image_processor
47
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def a__ ( ) -> Union[str, Any]: lowerCamelCase = ArgumentParser( description=( """PyTorch TPU distributed training launch """ """helper utility that will spawn up """ """multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=snake_case__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=snake_case__ , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=snake_case__ ) return parser.parse_args() def a__ ( ) -> List[str]: lowerCamelCase = parse_args() # Import training_script as a module. lowerCamelCase = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowerCamelCase = script_fpath.stem lowerCamelCase = importlib.import_module(snake_case__ ) # Patch sys.argv lowerCamelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
291
0
from __future__ import annotations from math import pi, sqrt def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple: if inductance <= 0: raise ValueError("Inductance cannot be 0 or negative" ) elif capacitance <= 0: raise ValueError("Capacitance cannot be 0 or negative" ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
48
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : List[str] = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "sew-d" def __init__( self , _a=32 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a=2 , _a=512 , _a=256 , _a=True , _a=True , _a=("p2c", "c2p") , _a="layer_norm" , _a="gelu_python" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=0.02 , _a=1e-7 , _a=1e-5 , _a="group" , _a="gelu" , _a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _a=False , _a=128 , _a=16 , _a=True , _a=0.05 , _a=10 , _a=2 , _a=0.0 , _a=10 , _a=0 , _a="mean" , _a=False , _a=False , _a=256 , _a=0 , _a=1 , _a=2 , **_a , ): """simple docstring""" super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a ) lowerCamelCase = hidden_size lowerCamelCase = feat_extract_norm lowerCamelCase = feat_extract_activation lowerCamelCase = list(_a ) lowerCamelCase = list(_a ) lowerCamelCase = list(_a ) lowerCamelCase = conv_bias lowerCamelCase = num_conv_pos_embeddings lowerCamelCase = num_conv_pos_embedding_groups lowerCamelCase = len(self.conv_dim ) lowerCamelCase = num_hidden_layers lowerCamelCase = intermediate_size lowerCamelCase = squeeze_factor lowerCamelCase = max_position_embeddings lowerCamelCase = position_buckets lowerCamelCase = share_att_key lowerCamelCase = relative_attention lowerCamelCase = norm_rel_ebd lowerCamelCase = list(_a ) lowerCamelCase = hidden_act lowerCamelCase = num_attention_heads lowerCamelCase = hidden_dropout lowerCamelCase = attention_dropout lowerCamelCase = activation_dropout lowerCamelCase = feat_proj_dropout lowerCamelCase = final_dropout lowerCamelCase = layer_norm_eps lowerCamelCase = feature_layer_norm_eps lowerCamelCase = initializer_range lowerCamelCase = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase = apply_spec_augment lowerCamelCase = mask_time_prob lowerCamelCase = mask_time_length lowerCamelCase = mask_time_min_masks lowerCamelCase = mask_feature_prob lowerCamelCase = mask_feature_length lowerCamelCase = mask_feature_min_masks # ctc loss lowerCamelCase = ctc_loss_reduction lowerCamelCase = ctc_zero_infinity # sequence classification lowerCamelCase = use_weighted_layer_sum lowerCamelCase = classifier_proj_size @property def _lowerCAmelCase ( self ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
291
0
import os def __snake_case ( ): __a = os.path.dirname(os.path.realpath(_UpperCAmelCase ) ) __a = os.path.join(_UpperCAmelCase , '''triangle.txt''' ) with open(_UpperCAmelCase ) as f: __a = f.readlines() __a = [] for line in triangle: __a = [] for number in line.strip().split(''' ''' ): numbers_from_line.append(int(_UpperCAmelCase ) ) a.append(_UpperCAmelCase ) for i in range(1 , len(_UpperCAmelCase ) ): for j in range(len(a[i] ) ): __a = a[i - 1][j] if j != len(a[i - 1] ) else 0 __a = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(_UpperCAmelCase , _UpperCAmelCase ) return max(a[-1] ) if __name__ == "__main__": print(solution())
49
"""simple docstring""" from sklearn.metrics import recall_score import datasets lowerCAmelCase : Any = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ lowerCAmelCase : Any = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ lowerCAmelCase : Any = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def _lowerCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None , _a="warn" , ): """simple docstring""" lowerCamelCase = recall_score( _a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a , zero_division=_a , ) return {"recall": float(_a ) if score.size == 1 else score}
291
0
from __future__ import annotations import math def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True _UpperCAmelCase : Any = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[int]: if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError('n must be an integer' ) if n <= 0: raise ValueError('n must be >= 0' ) lowerCamelCase__ : int = [] for num in range(len(_UpperCAmelCase ) ): lowerCamelCase__ : Union[str, Any] = 0 while 2 * i * i <= odd_composites[num]: lowerCamelCase__ : Dict = odd_composites[num] - 2 * i * i if is_prime(_UpperCAmelCase ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(_UpperCAmelCase ) == n: return list_nums return [] def SCREAMING_SNAKE_CASE ( ) -> int: return compute_nums(1 )[0] if __name__ == "__main__": print(F"""{solution() = }""")
50
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = dataset lowerCamelCase = process lowerCamelCase = params def __len__( self ): """simple docstring""" return len(self.dataset ) def __getitem__( self , _a ): """simple docstring""" lowerCamelCase = self.dataset[i] lowerCamelCase = self.process(_a , **self.params ) return processed class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a , _a , _a=None ): """simple docstring""" lowerCamelCase = loader lowerCamelCase = infer lowerCamelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether lowerCamelCase = None lowerCamelCase = loader_batch_size # Internal bookkeeping lowerCamelCase = None lowerCamelCase = None def __len__( self ): """simple docstring""" return len(self.loader ) def __iter__( self ): """simple docstring""" lowerCamelCase = iter(self.loader ) return self def _lowerCAmelCase ( self ): """simple docstring""" if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice lowerCamelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) lowerCamelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(_a , _a ): # Convert ModelOutput to tuple first lowerCamelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_a , _a ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around lowerCamelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. lowerCamelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 lowerCamelCase = self._loader_batch_data.__class__(_a ) self._loader_batch_index += 1 return result def _lowerCAmelCase ( self ): """simple docstring""" if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch lowerCamelCase = next(self.iterator ) lowerCamelCase = self.infer(_a , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_a , torch.Tensor ): lowerCamelCase = processed else: lowerCamelCase = list(processed.keys() )[0] lowerCamelCase = processed[key] if isinstance(_a , _a ): lowerCamelCase = len(_a ) else: lowerCamelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase = observed_batch_size # Setting internal index to unwrap the batch lowerCamelCase = processed lowerCamelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a , _a , _a=None ): """simple docstring""" super().__init__(_a , _a , _a ) def __iter__( self ): """simple docstring""" lowerCamelCase = iter(self.loader ) lowerCamelCase = None return self def _lowerCAmelCase ( self ): """simple docstring""" if self.subiterator is None: lowerCamelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item lowerCamelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators lowerCamelCase = self.infer(next(self.iterator ) , **self.params ) lowerCamelCase = next(self.subiterator ) return processed class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __iter__( self ): """simple docstring""" lowerCamelCase = iter(self.loader ) return self def _lowerCAmelCase ( self ): """simple docstring""" # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. lowerCamelCase = False lowerCamelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: lowerCamelCase = self.loader_batch_item() lowerCamelCase = item.pop("""is_last""" ) accumulator.append(_a ) if is_last: return accumulator while not is_last: lowerCamelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_a , torch.Tensor ): lowerCamelCase = processed else: lowerCamelCase = list(processed.keys() )[0] lowerCamelCase = processed[key] if isinstance(_a , _a ): lowerCamelCase = len(_a ) else: lowerCamelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase = observed_batch_size lowerCamelCase = processed lowerCamelCase = 0 while self._loader_batch_index < self.loader_batch_size: lowerCamelCase = self.loader_batch_item() lowerCamelCase = item.pop("""is_last""" ) accumulator.append(_a ) if is_last: return accumulator else: lowerCamelCase = processed lowerCamelCase = item.pop("""is_last""" ) accumulator.append(_a ) return accumulator class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a ): """simple docstring""" lowerCamelCase = dataset lowerCamelCase = key def __len__( self ): """simple docstring""" return len(self.dataset ) def __getitem__( self , _a ): """simple docstring""" return self.dataset[i][self.key] class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = dataset lowerCamelCase = keya lowerCamelCase = keya def __len__( self ): """simple docstring""" return len(self.dataset ) def __getitem__( self , _a ): """simple docstring""" return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
291
0
import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def A (__A : Tuple , __A : str , __A : Dict ) -> int: """simple docstring""" if isinstance(__A , torch.Tensor ): return image elif isinstance(__A , PIL.Image.Image ): UpperCAmelCase_ = [image] if isinstance(image[0] , PIL.Image.Image ): UpperCAmelCase_ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image] UpperCAmelCase_ = np.concatenate(__A , axis=0 ) UpperCAmelCase_ = np.array(__A ).astype(np.floataa ) / 255.0 UpperCAmelCase_ = image.transpose(0 , 3 , 1 , 2 ) UpperCAmelCase_ = 2.0 * image - 1.0 UpperCAmelCase_ = torch.from_numpy(__A ) elif isinstance(image[0] , torch.Tensor ): UpperCAmelCase_ = torch.cat(__A , dim=0 ) return image def A (__A : Dict , __A : List[str] , __A : Dict , __A : Any=0.9_995 ) -> Optional[Any]: """simple docstring""" if not isinstance(__A , np.ndarray ): UpperCAmelCase_ = True UpperCAmelCase_ = va.device UpperCAmelCase_ = va.cpu().numpy() UpperCAmelCase_ = va.cpu().numpy() UpperCAmelCase_ = np.sum(va * va / (np.linalg.norm(__A ) * np.linalg.norm(__A )) ) if np.abs(__A ) > DOT_THRESHOLD: UpperCAmelCase_ = (1 - t) * va + t * va else: UpperCAmelCase_ = np.arccos(__A ) UpperCAmelCase_ = np.sin(__A ) UpperCAmelCase_ = theta_a * t UpperCAmelCase_ = np.sin(__A ) UpperCAmelCase_ = np.sin(theta_a - theta_t ) / sin_theta_a UpperCAmelCase_ = sin_theta_t / sin_theta_a UpperCAmelCase_ = sa * va + sa * va if inputs_are_torch: UpperCAmelCase_ = torch.from_numpy(__A ).to(__A ) return va def A (__A : Dict , __A : Optional[int] ) -> List[str]: """simple docstring""" UpperCAmelCase_ = F.normalize(__A , dim=-1 ) UpperCAmelCase_ = F.normalize(__A , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def A (__A : Optional[int] , __A : int ) -> Optional[Any]: """simple docstring""" for param in model.parameters(): UpperCAmelCase_ = value class __snake_case ( a ): def __init__( self : Union[str, Any] , _snake_case : AutoencoderKL , _snake_case : CLIPTextModel , _snake_case : CLIPModel , _snake_case : CLIPTokenizer , _snake_case : UNetaDConditionModel , _snake_case : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _snake_case : CLIPFeatureExtractor , _snake_case : Any=None , _snake_case : str=None , _snake_case : Optional[int]=None , ): """simple docstring""" super().__init__() self.register_modules( vae=_snake_case , text_encoder=_snake_case , clip_model=_snake_case , tokenizer=_snake_case , unet=_snake_case , scheduler=_snake_case , feature_extractor=_snake_case , coca_model=_snake_case , coca_tokenizer=_snake_case , coca_transform=_snake_case , ) UpperCAmelCase_ = ( feature_extractor.size if isinstance(feature_extractor.size , _snake_case) else feature_extractor.size['''shortest_edge'''] ) UpperCAmelCase_ = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std) set_requires_grad(self.text_encoder , _snake_case) set_requires_grad(self.clip_model , _snake_case) def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[Union[str, int]] = "auto"): """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_snake_case) def lowerCamelCase ( self : int): """simple docstring""" self.enable_attention_slicing(_snake_case) def lowerCamelCase ( self : int): """simple docstring""" set_requires_grad(self.vae , _snake_case) def lowerCamelCase ( self : Any): """simple docstring""" set_requires_grad(self.vae , _snake_case) def lowerCamelCase ( self : Optional[int]): """simple docstring""" set_requires_grad(self.unet , _snake_case) def lowerCamelCase ( self : int): """simple docstring""" set_requires_grad(self.unet , _snake_case) def lowerCamelCase ( self : Dict , _snake_case : List[str] , _snake_case : List[str] , _snake_case : List[Any]): """simple docstring""" UpperCAmelCase_ = min(int(num_inference_steps * strength) , _snake_case) UpperCAmelCase_ = max(num_inference_steps - init_timestep , 0) UpperCAmelCase_ = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def lowerCamelCase ( self : int , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : str , _snake_case : Tuple , _snake_case : Tuple=None): """simple docstring""" if not isinstance(_snake_case , torch.Tensor): raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(_snake_case)}""") UpperCAmelCase_ = image.to(device=_snake_case , dtype=_snake_case) if isinstance(_snake_case , _snake_case): UpperCAmelCase_ = [ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(_snake_case) ] UpperCAmelCase_ = torch.cat(_snake_case , dim=0) else: UpperCAmelCase_ = self.vae.encode(_snake_case).latent_dist.sample(_snake_case) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCAmelCase_ = 0.1_8_2_1_5 * init_latents UpperCAmelCase_ = init_latents.repeat_interleave(_snake_case , dim=0) UpperCAmelCase_ = randn_tensor(init_latents.shape , generator=_snake_case , device=_snake_case , dtype=_snake_case) # get latents UpperCAmelCase_ = self.scheduler.add_noise(_snake_case , _snake_case , _snake_case) UpperCAmelCase_ = init_latents return latents def lowerCamelCase ( self : Any , _snake_case : Optional[Any]): """simple docstring""" UpperCAmelCase_ = self.coca_transform(_snake_case).unsqueeze(0) with torch.no_grad(), torch.cuda.amp.autocast(): UpperCAmelCase_ = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype)) UpperCAmelCase_ = self.coca_tokenizer.decode(generated[0].cpu().numpy()) return generated.split('''<end_of_text>''')[0].replace('''<start_of_text>''' , '''''').rstrip(''' .,''') def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : int): """simple docstring""" UpperCAmelCase_ = self.feature_extractor.preprocess(_snake_case) UpperCAmelCase_ = torch.from_numpy(clip_image_input['''pixel_values'''][0]).unsqueeze(0).to(self.device).half() UpperCAmelCase_ = self.clip_model.get_image_features(_snake_case) UpperCAmelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_snake_case) UpperCAmelCase_ = image_embeddings_clip.repeat_interleave(_snake_case , dim=0) return image_embeddings_clip @torch.enable_grad() def lowerCamelCase ( self : Optional[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Any , _snake_case : Any , _snake_case : List[Any] , _snake_case : List[str] , ): """simple docstring""" UpperCAmelCase_ = latents.detach().requires_grad_() UpperCAmelCase_ = self.scheduler.scale_model_input(_snake_case , _snake_case) # predict the noise residual UpperCAmelCase_ = self.unet(_snake_case , _snake_case , encoder_hidden_states=_snake_case).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)): UpperCAmelCase_ = self.scheduler.alphas_cumprod[timestep] UpperCAmelCase_ = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCAmelCase_ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 UpperCAmelCase_ = torch.sqrt(_snake_case) UpperCAmelCase_ = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , _snake_case): UpperCAmelCase_ = self.scheduler.sigmas[index] UpperCAmelCase_ = latents - sigma * noise_pred else: raise ValueError(F"""scheduler type {type(self.scheduler)} not supported""") # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCAmelCase_ = 1 / 0.1_8_2_1_5 * sample UpperCAmelCase_ = self.vae.decode(_snake_case).sample UpperCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1) UpperCAmelCase_ = transforms.Resize(self.feature_extractor_size)(_snake_case) UpperCAmelCase_ = self.normalize(_snake_case).to(latents.dtype) UpperCAmelCase_ = self.clip_model.get_image_features(_snake_case) UpperCAmelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_snake_case) UpperCAmelCase_ = spherical_dist_loss(_snake_case , _snake_case).mean() * clip_guidance_scale UpperCAmelCase_ = -torch.autograd.grad(_snake_case , _snake_case)[0] if isinstance(self.scheduler , _snake_case): UpperCAmelCase_ = latents.detach() + grads * (sigma**2) UpperCAmelCase_ = noise_pred_original else: UpperCAmelCase_ = noise_pred_original - torch.sqrt(_snake_case) * grads return noise_pred, latents @torch.no_grad() def __call__( self : List[Any] , _snake_case : Union[torch.FloatTensor, PIL.Image.Image] , _snake_case : Union[torch.FloatTensor, PIL.Image.Image] , _snake_case : Optional[str] = None , _snake_case : Optional[str] = None , _snake_case : Optional[int] = 512 , _snake_case : Optional[int] = 512 , _snake_case : float = 0.6 , _snake_case : Optional[int] = 50 , _snake_case : Optional[float] = 7.5 , _snake_case : Optional[int] = 1 , _snake_case : float = 0.0 , _snake_case : Optional[float] = 100 , _snake_case : Optional[torch.Generator] = None , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , _snake_case : float = 0.8 , _snake_case : float = 0.1 , _snake_case : float = 0.1 , ): """simple docstring""" if isinstance(_snake_case , _snake_case) and len(_snake_case) != batch_size: raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(_snake_case)} generators.""") if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""") if isinstance(_snake_case , torch.Generator) and batch_size > 1: UpperCAmelCase_ = [generator] + [None] * (batch_size - 1) UpperCAmelCase_ = [ ('''model''', self.coca_model is None), ('''tokenizer''', self.coca_tokenizer is None), ('''transform''', self.coca_transform is None), ] UpperCAmelCase_ = [x[0] for x in coca_is_none if x[1]] UpperCAmelCase_ = ''', '''.join(_snake_case) # generate prompts with coca model if prompt is None if content_prompt is None: if len(_snake_case): raise ValueError( F"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""") UpperCAmelCase_ = self.get_image_description(_snake_case) if style_prompt is None: if len(_snake_case): raise ValueError( F"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""") UpperCAmelCase_ = self.get_image_description(_snake_case) # get prompt text embeddings for content and style UpperCAmelCase_ = self.tokenizer( _snake_case , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=_snake_case , return_tensors='''pt''' , ) UpperCAmelCase_ = self.text_encoder(content_text_input.input_ids.to(self.device))[0] UpperCAmelCase_ = self.tokenizer( _snake_case , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=_snake_case , return_tensors='''pt''' , ) UpperCAmelCase_ = self.text_encoder(style_text_input.input_ids.to(self.device))[0] UpperCAmelCase_ = slerp(_snake_case , _snake_case , _snake_case) # duplicate text embeddings for each generation per prompt UpperCAmelCase_ = text_embeddings.repeat_interleave(_snake_case , dim=0) # set timesteps UpperCAmelCase_ = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys()) UpperCAmelCase_ = {} if accepts_offset: UpperCAmelCase_ = 1 self.scheduler.set_timesteps(_snake_case , **_snake_case) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device) UpperCAmelCase_ , UpperCAmelCase_ = self.get_timesteps(_snake_case , _snake_case , self.device) UpperCAmelCase_ = timesteps[:1].repeat(_snake_case) # Preprocess image UpperCAmelCase_ = preprocess(_snake_case , _snake_case , _snake_case) UpperCAmelCase_ = self.prepare_latents( _snake_case , _snake_case , _snake_case , text_embeddings.dtype , self.device , _snake_case) UpperCAmelCase_ = preprocess(_snake_case , _snake_case , _snake_case) UpperCAmelCase_ = self.prepare_latents( _snake_case , _snake_case , _snake_case , text_embeddings.dtype , self.device , _snake_case) UpperCAmelCase_ = slerp(_snake_case , _snake_case , _snake_case) if clip_guidance_scale > 0: UpperCAmelCase_ = self.get_clip_image_embeddings(_snake_case , _snake_case) UpperCAmelCase_ = self.get_clip_image_embeddings(_snake_case , _snake_case) UpperCAmelCase_ = slerp( _snake_case , _snake_case , _snake_case) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCAmelCase_ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCAmelCase_ = content_text_input.input_ids.shape[-1] UpperCAmelCase_ = self.tokenizer([''''''] , padding='''max_length''' , max_length=_snake_case , return_tensors='''pt''') UpperCAmelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt UpperCAmelCase_ = uncond_embeddings.repeat_interleave(_snake_case , dim=0) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase_ = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCAmelCase_ = (batch_size, self.unet.config.in_channels, height // 8, width // 8) UpperCAmelCase_ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps UpperCAmelCase_ = torch.randn(_snake_case , generator=_snake_case , device='''cpu''' , dtype=_snake_case).to( self.device) else: UpperCAmelCase_ = torch.randn(_snake_case , generator=_snake_case , device=self.device , dtype=_snake_case) else: if latents.shape != latents_shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""") UpperCAmelCase_ = latents.to(self.device) # scale the initial noise by the standard deviation required by the scheduler UpperCAmelCase_ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCAmelCase_ = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys()) UpperCAmelCase_ = {} if accepts_eta: UpperCAmelCase_ = eta # check if the scheduler accepts generator UpperCAmelCase_ = '''generator''' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: UpperCAmelCase_ = generator with self.progress_bar(total=_snake_case): for i, t in enumerate(_snake_case): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents UpperCAmelCase_ = self.scheduler.scale_model_input(_snake_case , _snake_case) # predict the noise residual UpperCAmelCase_ = self.unet(_snake_case , _snake_case , encoder_hidden_states=_snake_case).sample # perform classifier free guidance if do_classifier_free_guidance: UpperCAmelCase_ , UpperCAmelCase_ = noise_pred.chunk(2) UpperCAmelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: UpperCAmelCase_ = ( text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings ) UpperCAmelCase_ , UpperCAmelCase_ = self.cond_fn( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ = self.scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCAmelCase_ = 1 / 0.1_8_2_1_5 * latents UpperCAmelCase_ = self.vae.decode(_snake_case).sample UpperCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1) UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": UpperCAmelCase_ = self.numpy_to_pil(_snake_case) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=_snake_case , nsfw_content_detected=_snake_case)
51
"""simple docstring""" def a__ ( snake_case__ ) -> bool: lowerCamelCase = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def a__ ( snake_case__ = 50_00 ) -> int: lowerCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , snake_case__ )] for i, pentagonal_i in enumerate(snake_case__ ): for j in range(snake_case__ , len(snake_case__ ) ): lowerCamelCase = pentagonal_nums[j] lowerCamelCase = pentagonal_i + pentagonal_j lowerCamelCase = pentagonal_j - pentagonal_i if is_pentagonal(snake_case__ ) and is_pentagonal(snake_case__ ): return b return -1 if __name__ == "__main__": print(F"""{solution() = }""")
291
0
import argparse import requests import torch from PIL import Image from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor def A_ ( _lowerCAmelCase ) -> Optional[int]: if "cls_token" in name: UpperCamelCase : Union[str, Any] = name.replace("cls_token" , "vit.embeddings.cls_token" ) if "mask_token" in name: UpperCamelCase : str = name.replace("mask_token" , "decoder.mask_token" ) if "decoder_pos_embed" in name: UpperCamelCase : List[Any] = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" ) if "pos_embed" in name and "decoder" not in name: UpperCamelCase : List[Any] = name.replace("pos_embed" , "vit.embeddings.position_embeddings" ) if "patch_embed.proj" in name: UpperCamelCase : List[Any] = name.replace("patch_embed.proj" , "vit.embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: UpperCamelCase : str = name.replace("patch_embed.norm" , "vit.embeddings.norm" ) if "decoder_blocks" in name: UpperCamelCase : Any = name.replace("decoder_blocks" , "decoder.decoder_layers" ) if "blocks" in name: UpperCamelCase : Any = name.replace("blocks" , "vit.encoder.layer" ) if "attn.proj" in name: UpperCamelCase : Any = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: UpperCamelCase : Union[str, Any] = name.replace("attn" , "attention.self" ) if "norm1" in name: UpperCamelCase : List[str] = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: UpperCamelCase : List[Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: UpperCamelCase : List[Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: UpperCamelCase : List[Any] = name.replace("mlp.fc2" , "output.dense" ) if "decoder_embed" in name: UpperCamelCase : Any = name.replace("decoder_embed" , "decoder.decoder_embed" ) if "decoder_norm" in name: UpperCamelCase : Union[str, Any] = name.replace("decoder_norm" , "decoder.decoder_norm" ) if "decoder_pred" in name: UpperCamelCase : List[Any] = name.replace("decoder_pred" , "decoder.decoder_pred" ) if "norm.weight" in name and "decoder" not in name: UpperCamelCase : Dict = name.replace("norm.weight" , "vit.layernorm.weight" ) if "norm.bias" in name and "decoder" not in name: UpperCamelCase : List[Any] = name.replace("norm.bias" , "vit.layernorm.bias" ) return name def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: for key in orig_state_dict.copy().keys(): UpperCamelCase : int = orig_state_dict.pop(_lowerCAmelCase ) if "qkv" in key: UpperCamelCase : List[str] = key.split("." ) UpperCamelCase : List[Any] = int(key_split[1] ) if "decoder_blocks" in key: UpperCamelCase : Optional[Any] = config.decoder_hidden_size UpperCamelCase : Union[str, Any] = "decoder.decoder_layers." if "weight" in key: UpperCamelCase : List[Any] = val[:dim, :] UpperCamelCase : Tuple = val[dim : dim * 2, :] UpperCamelCase : List[str] = val[-dim:, :] elif "bias" in key: UpperCamelCase : Tuple = val[:dim] UpperCamelCase : Union[str, Any] = val[dim : dim * 2] UpperCamelCase : str = val[-dim:] else: UpperCamelCase : Union[str, Any] = config.hidden_size UpperCamelCase : Union[str, Any] = "vit.encoder.layer." if "weight" in key: UpperCamelCase : Dict = val[:dim, :] UpperCamelCase : List[Any] = val[dim : dim * 2, :] UpperCamelCase : Any = val[-dim:, :] elif "bias" in key: UpperCamelCase : List[str] = val[:dim] UpperCamelCase : Optional[Any] = val[dim : dim * 2] UpperCamelCase : Optional[Any] = val[-dim:] else: UpperCamelCase : List[str] = val return orig_state_dict def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: UpperCamelCase : Union[str, Any] = ViTMAEConfig() if "large" in checkpoint_url: UpperCamelCase : Optional[Any] = 1024 UpperCamelCase : Union[str, Any] = 4096 UpperCamelCase : List[Any] = 24 UpperCamelCase : Optional[Any] = 16 elif "huge" in checkpoint_url: UpperCamelCase : List[str] = 14 UpperCamelCase : Dict = 1280 UpperCamelCase : str = 5120 UpperCamelCase : Any = 32 UpperCamelCase : Any = 16 UpperCamelCase : Optional[Any] = ViTMAEForPreTraining(_lowerCAmelCase ) UpperCamelCase : Dict = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" )["model"] UpperCamelCase : Dict = ViTMAEImageProcessor(size=config.image_size ) UpperCamelCase : Any = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase ) model.load_state_dict(_lowerCAmelCase ) model.eval() UpperCamelCase : str = "https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg" UpperCamelCase : List[str] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) UpperCamelCase : List[Any] = ViTMAEImageProcessor(size=config.image_size ) UpperCamelCase : Union[str, Any] = image_processor(images=_lowerCAmelCase , return_tensors="pt" ) # forward pass torch.manual_seed(2 ) UpperCamelCase : str = model(**_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = outputs.logits if "large" in checkpoint_url: UpperCamelCase : Optional[int] = torch.tensor( [[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] ) elif "huge" in checkpoint_url: UpperCamelCase : Optional[Any] = torch.tensor( [[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] ) else: UpperCamelCase : int = torch.tensor( [[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] ) # verify logits assert torch.allclose(logits[0, :3, :3] , _lowerCAmelCase , atol=1e-4 ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowerCAmelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __lowerCamelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) __lowerCamelCase : Optional[int] = parser.parse_args() convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
52
"""simple docstring""" from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging lowerCAmelCase : Tuple = logging.get_logger(__name__) def a__ ( snake_case__ , snake_case__ ) -> Tuple: try: with open(snake_case__ , """rb""" ) as flax_state_f: lowerCamelCase = from_bytes(snake_case__ , flax_state_f.read() ) except UnpicklingError as e: try: with open(snake_case__ ) as f: if f.read().startswith("""version""" ): raise OSError( """You seem to have cloned a repository without having git-lfs installed. Please""" """ install git-lfs and run `git lfs install` followed by `git lfs pull` in the""" """ folder you cloned.""" ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def a__ ( snake_case__ , snake_case__ ) -> Tuple: try: import torch # noqa: F401 except ImportError: logger.error( """Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights lowerCamelCase = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) lowerCamelCase = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) lowerCamelCase = """""" lowerCamelCase = flatten_dict(snake_case__ , sep=""".""" ) lowerCamelCase = pt_model.state_dict() # keep track of unexpected & missing keys lowerCamelCase = [] lowerCamelCase = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCamelCase = flax_key_tuple.split(""".""" ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""] lowerCamelCase = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""] lowerCamelCase = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(snake_case__ ): lowerCamelCase = ( flax_key_tuple_string.replace("""_0""" , """.0""" ) .replace("""_1""" , """.1""" ) .replace("""_2""" , """.2""" ) .replace("""_3""" , """.3""" ) .replace("""_4""" , """.4""" ) .replace("""_5""" , """.5""" ) .replace("""_6""" , """.6""" ) .replace("""_7""" , """.7""" ) .replace("""_8""" , """.8""" ) .replace("""_9""" , """.9""" ) ) lowerCamelCase = """.""".join(snake_case__ ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict lowerCamelCase = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor lowerCamelCase = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list lowerCamelCase = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) if len(snake_case__ ) > 0: logger.warning( F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' """ use it for predictions and inference.""" ) return pt_model
291
0
'''simple docstring''' def lowercase__ ( __lowercase : str , __lowercase : int ) -> list: """simple docstring""" __UpperCamelCase = word.split() def justify(__lowercase : list , __lowercase : int , __lowercase : int ) -> str: __UpperCamelCase = max_width - width __UpperCamelCase = len(__lowercase ) if len(__lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: __UpperCamelCase = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] __UpperCamelCase = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] __UpperCamelCase = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(__lowercase ): num_spaces_between_words_list[i] += 1 __UpperCamelCase = [] for i in range(__lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ' ' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(__lowercase ) __UpperCamelCase = [] __UpperCamelCase = [] __UpperCamelCase = 0 for word in words: if width + len(__lowercase ) + len(__lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(__lowercase ) width += len(__lowercase ) else: # justify the line and add it to result answer.append(justify(__lowercase , __lowercase , __lowercase ) ) # reset new line and new width __UpperCamelCase , __UpperCamelCase = [word], len(__lowercase ) __UpperCamelCase = max_width - width - len(__lowercase ) answer.append(' '.join(__lowercase ) + (remaining_spaces + 1) * ' ' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
53
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: lowerCAmelCase : int = None lowerCAmelCase : Tuple = logging.get_logger(__name__) lowerCAmelCase : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Union[str, Any] = { """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", }, """tokenizer_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""", }, } lowerCAmelCase : Optional[int] = { """xlnet-base-cased""": None, """xlnet-large-cased""": None, } lowerCAmelCase : Union[str, Any] = """▁""" # Segments (not really needed) lowerCAmelCase : str = 0 lowerCAmelCase : Optional[int] = 1 lowerCAmelCase : Tuple = 2 lowerCAmelCase : Optional[Any] = 3 lowerCAmelCase : List[Any] = 4 class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = "left" __UpperCamelCase = XLNetTokenizer def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , **_a , ): """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token super().__init__( vocab_file=_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , ) lowerCamelCase = 3 lowerCamelCase = do_lower_case lowerCamelCase = remove_space lowerCamelCase = keep_accents lowerCamelCase = vocab_file lowerCamelCase = False if not self.vocab_file else True def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" lowerCamelCase = [self.sep_token_id] lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" lowerCamelCase = [self.sep_token_id] lowerCamelCase = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(_a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowerCamelCase = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
291
0
"""simple docstring""" import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device a__ : Tuple = False class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" pass @slow @require_torch_gpu class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" def UpperCAmelCase_ ( self : int ) -> int: __SCREAMING_SNAKE_CASE = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe( image=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" , ).images __SCREAMING_SNAKE_CASE = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __SCREAMING_SNAKE_CASE = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
54
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class __magic_name__ ( unittest.TestCase ): '''simple docstring''' __UpperCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __UpperCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def _lowerCAmelCase ( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = TextaTextGenerationPipeline(model=_a , tokenizer=_a ) return generator, ["Something to write", "Something else"] def _lowerCAmelCase ( self , _a , _a ): """simple docstring""" lowerCamelCase = generator("""Something there""" ) self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) lowerCamelCase = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) lowerCamelCase = generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) with self.assertRaises(_a ): generator(4 ) @require_torch def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" ) # do_sample=False necessary for reproducibility lowerCamelCase = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] ) lowerCamelCase = 3 lowerCamelCase = generator( """Something there""" , num_return_sequences=_a , num_beams=_a , ) lowerCamelCase = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_a , _a ) lowerCamelCase = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a ) self.assertEqual( _a , [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] , ) lowerCamelCase = generator.model.config.eos_token_id lowerCamelCase = """<pad>""" lowerCamelCase = generator( ["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , ) self.assertEqual( _a , [ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] , ) @require_tf def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" ) # do_sample=False necessary for reproducibility lowerCamelCase = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] )
291
0
'''simple docstring''' def __snake_case ( UpperCAmelCase_ : int = 1000 ): return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
55
"""simple docstring""" def a__ ( snake_case__ , snake_case__ = False ) -> str: if not isinstance(snake_case__ , snake_case__ ): lowerCamelCase = F'Expected string as input, found {type(snake_case__ )}' raise ValueError(snake_case__ ) if not isinstance(snake_case__ , snake_case__ ): lowerCamelCase = F'Expected boolean as use_pascal parameter, found {type(snake_case__ )}' raise ValueError(snake_case__ ) lowerCamelCase = input_str.split("""_""" ) lowerCamelCase = 0 if use_pascal else 1 lowerCamelCase = words[start_index:] lowerCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize] lowerCamelCase = """""" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
291
0
'''simple docstring''' from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline a : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(_lowerCamelCase ) class a ( _lowerCamelCase ): def __init__( self : Any , **lowercase_ : Any ): super().__init__(**lowercase_ ) if self.framework != "pt": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) # No specific FOR_XXX available yet def __call__( self : Dict , lowercase_ : Union[np.ndarray, bytes, str] , **lowercase_ : Dict ): return super().__call__(lowercase_ , **lowercase_ ) def A_ ( self : Union[str, Any] , **lowercase_ : List[str] ): snake_case_ = {} if "candidate_labels" in kwargs: snake_case_ = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: snake_case_ = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def A_ ( self : str , lowercase_ : List[str] , lowercase_ : Optional[int]=None , lowercase_ : Tuple="This is a sound of {}." ): if isinstance(lowercase_ , lowercase_ ): if audio.startswith('''http://''' ) or audio.startswith('''https://''' ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png snake_case_ = requests.get(lowercase_ ).content else: with open(lowercase_ , '''rb''' ) as f: snake_case_ = f.read() if isinstance(lowercase_ , lowercase_ ): snake_case_ = ffmpeg_read(lowercase_ , self.feature_extractor.sampling_rate ) if not isinstance(lowercase_ , np.ndarray ): raise ValueError('''We expect a numpy ndarray as input''' ) if len(audio.shape ) != 1: raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' ) snake_case_ = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' ) snake_case_ = candidate_labels snake_case_ = [hypothesis_template.format(lowercase_ ) for x in candidate_labels] snake_case_ = self.tokenizer(lowercase_ , return_tensors=self.framework , padding=lowercase_ ) snake_case_ = [text_inputs] return inputs def A_ ( self : int , lowercase_ : str ): snake_case_ = model_inputs.pop('''candidate_labels''' ) snake_case_ = model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0] , lowercase_ ): snake_case_ = text_inputs[0] else: # Batching case. snake_case_ = text_inputs[0][0] snake_case_ = self.model(**lowercase_ , **lowercase_ ) snake_case_ = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_audio, } return model_outputs def A_ ( self : Any , lowercase_ : int ): snake_case_ = model_outputs.pop('''candidate_labels''' ) snake_case_ = model_outputs['''logits'''][0] if self.framework == "pt": snake_case_ = logits.softmax(dim=0 ) snake_case_ = probs.tolist() else: raise ValueError('''`tf` framework not supported.''' ) snake_case_ = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(lowercase_ , lowercase_ ) , key=lambda lowercase_ : -x[0] ) ] return result
56
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch lowerCAmelCase : int = logging.get_logger(__name__) class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["pixel_values"] def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = None , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ): """simple docstring""" super().__init__(**_a ) lowerCamelCase = size if size is not None else {"""shortest_edge""": 256} lowerCamelCase = get_size_dict(_a , default_to_square=_a ) lowerCamelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" ) lowerCamelCase = do_resize lowerCamelCase = size lowerCamelCase = resample lowerCamelCase = do_center_crop lowerCamelCase = crop_size lowerCamelCase = do_rescale lowerCamelCase = rescale_factor lowerCamelCase = do_normalize lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ): """simple docstring""" lowerCamelCase = get_size_dict(_a , default_to_square=_a ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) lowerCamelCase = get_resize_output_image_size(_a , size=size["""shortest_edge"""] , default_to_square=_a ) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a , _a = None , **_a , ): """simple docstring""" lowerCamelCase = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a , _a = None , **_a ): """simple docstring""" return rescale(_a , scale=_a , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a , _a , _a = None , **_a , ): """simple docstring""" return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ): """simple docstring""" lowerCamelCase = do_resize if do_resize is not None else self.do_resize lowerCamelCase = size if size is not None else self.size lowerCamelCase = get_size_dict(_a , default_to_square=_a ) lowerCamelCase = resample if resample is not None else self.resample lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase = crop_size if crop_size is not None else self.crop_size lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" ) lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase = image_mean if image_mean is not None else self.image_mean lowerCamelCase = image_std if image_std is not None else self.image_std lowerCamelCase = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowerCamelCase = [to_numpy_array(_a ) for image in images] if do_resize: lowerCamelCase = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_center_crop: lowerCamelCase = [self.center_crop(image=_a , size=_a ) for image in images] if do_rescale: lowerCamelCase = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: lowerCamelCase = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] lowerCamelCase = [to_channel_dimension_format(_a , _a ) for image in images] lowerCamelCase = {"""pixel_values""": images} return BatchFeature(data=_a , tensor_type=_a ) def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" lowerCamelCase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_a ) != len(_a ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(_a ): lowerCamelCase = target_sizes.numpy() lowerCamelCase = [] for idx in range(len(_a ) ): lowerCamelCase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_a ) lowerCamelCase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_a ) else: lowerCamelCase = logits.argmax(dim=1 ) lowerCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
291
0
"""simple docstring""" import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _UpperCamelCase : '''simple docstring''' @staticmethod def snake_case ( *__a , **__a ): pass @is_pipeline_test @require_torch @require_vision class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def snake_case ( self , __a , __a , __a ): __lowerCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" ) __lowerCAmelCase = [ { "image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "question": "How many cats are there?", }, { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "question": "How many cats are there?", }, ] return vqa_pipeline, examples def snake_case ( self , __a , __a ): __lowerCAmelCase = vqa_pipeline(__a , top_k=1 ) self.assertEqual( __a , [ [{"score": ANY(__a ), "answer": ANY(__a )}], [{"score": ANY(__a ), "answer": ANY(__a )}], ] , ) @require_torch def snake_case ( self ): __lowerCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" ) __lowerCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png" __lowerCAmelCase = "How many cats are there?" __lowerCAmelCase = vqa_pipeline(image=__a , question="How many cats are there?" , top_k=2 ) self.assertEqual( __a , [{"score": ANY(__a ), "answer": ANY(__a )}, {"score": ANY(__a ), "answer": ANY(__a )}] ) __lowerCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( __a , [{"score": ANY(__a ), "answer": ANY(__a )}, {"score": ANY(__a ), "answer": ANY(__a )}] ) @slow @require_torch def snake_case ( self ): __lowerCAmelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" ) __lowerCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png" __lowerCAmelCase = "How many cats are there?" __lowerCAmelCase = vqa_pipeline(image=__a , question=__a , top_k=2 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}] ) __lowerCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}] ) __lowerCAmelCase = vqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [[{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}]] * 2 , ) @require_tf @unittest.skip("Visual question answering not implemented in TF" ) def snake_case ( self ): pass
57
"""simple docstring""" import operator as op lowerCAmelCase : Dict = """scaler.pt""" lowerCAmelCase : Tuple = """pytorch_model""" lowerCAmelCase : Union[str, Any] = """random_states""" lowerCAmelCase : Union[str, Any] = """optimizer""" lowerCAmelCase : Dict = """scheduler""" lowerCAmelCase : int = """pytorch_model.bin""" lowerCAmelCase : str = """pytorch_model.bin.index.json""" lowerCAmelCase : Union[str, Any] = """model.safetensors""" lowerCAmelCase : List[Any] = """model.safetensors.index.json""" lowerCAmelCase : List[Any] = """1.10.2""" lowerCAmelCase : Any = """py38""" lowerCAmelCase : Optional[int] = """4.17.0""" lowerCAmelCase : str = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""] lowerCAmelCase : Tuple = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""] lowerCAmelCase : List[Any] = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""] lowerCAmelCase : List[str] = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""] lowerCAmelCase : List[str] = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""] lowerCAmelCase : Any = """2.0.1""" lowerCAmelCase : List[Any] = ["""pdsh""", """standard""", """openmpi""", """mvapich"""] lowerCAmelCase : Union[str, Any] = ["""default""", """reduce-overhead""", """max-autotune"""] lowerCAmelCase : Optional[int] = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 lowerCAmelCase : Union[str, Any] = [ """nnodes""", """nproc_per_node""", """rdzv_backend""", """rdzv_endpoint""", """rdzv_id""", """rdzv_conf""", """standalone""", """max_restarts""", """monitor_interval""", """start_method""", """role""", """module""", """m""", """no_python""", """run_path""", """log_dir""", """r""", """redirects""", """t""", """tee""", """node_rank""", """master_addr""", """master_port""", ] lowerCAmelCase : List[str] = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""] lowerCAmelCase : Optional[Any] = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
291
0
'''simple docstring''' import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) lowercase_ = { """sample_size""": 32, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": 1_000, """block_out_channels""": [32, 64], """attention_head_dim""": 8, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } lowercase_ = { """sample_size""": 64, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 3, """num_class_embeds""": 1_000, """block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } lowercase_ = { """sample_size""": 256, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": None, """block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """default""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } lowercase_ = { """num_train_timesteps""": 40, """sigma_min""": 0.0_0_2, """sigma_max""": 8_0.0, } lowercase_ = { """num_train_timesteps""": 201, """sigma_min""": 0.0_0_2, """sigma_max""": 8_0.0, } lowercase_ = { """num_train_timesteps""": 151, """sigma_min""": 0.0_0_2, """sigma_max""": 8_0.0, } def lowerCamelCase ( __lowerCamelCase : Dict ) ->Optional[int]: if isinstance(__lowerCamelCase , __lowerCamelCase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError("""boolean value expected""" ) def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int]=False ) ->List[Any]: _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.0.weight'] _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.0.bias'] _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.2.weight'] _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.2.bias'] _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.emb_layers.1.weight'] _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.emb_layers.1.bias'] _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.0.weight'] _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.0.bias'] _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.3.weight'] _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.3.bias'] if has_skip: _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.skip_connection.weight'] _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.skip_connection.bias'] return new_checkpoint def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any]=None ) ->int: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 , dim=0 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 , dim=0 ) _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.norm.weight'] _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.norm.bias'] _SCREAMING_SNAKE_CASE = weight_q.squeeze(-1 ).squeeze(-1 ) _SCREAMING_SNAKE_CASE = bias_q.squeeze(-1 ).squeeze(-1 ) _SCREAMING_SNAKE_CASE = weight_k.squeeze(-1 ).squeeze(-1 ) _SCREAMING_SNAKE_CASE = bias_k.squeeze(-1 ).squeeze(-1 ) _SCREAMING_SNAKE_CASE = weight_v.squeeze(-1 ).squeeze(-1 ) _SCREAMING_SNAKE_CASE = bias_v.squeeze(-1 ).squeeze(-1 ) _SCREAMING_SNAKE_CASE = ( checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 ) ) _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Tuple ) ->List[str]: _SCREAMING_SNAKE_CASE = torch.load(__lowerCamelCase , map_location="""cpu""" ) _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = checkpoint["""time_embed.0.weight"""] _SCREAMING_SNAKE_CASE = checkpoint["""time_embed.0.bias"""] _SCREAMING_SNAKE_CASE = checkpoint["""time_embed.2.weight"""] _SCREAMING_SNAKE_CASE = checkpoint["""time_embed.2.bias"""] if unet_config["num_class_embeds"] is not None: _SCREAMING_SNAKE_CASE = checkpoint["""label_emb.weight"""] _SCREAMING_SNAKE_CASE = checkpoint["""input_blocks.0.0.weight"""] _SCREAMING_SNAKE_CASE = checkpoint["""input_blocks.0.0.bias"""] _SCREAMING_SNAKE_CASE = unet_config["""down_block_types"""] _SCREAMING_SNAKE_CASE = unet_config["""layers_per_block"""] _SCREAMING_SNAKE_CASE = unet_config["""attention_head_dim"""] _SCREAMING_SNAKE_CASE = unet_config["""block_out_channels"""] _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = channels_list[0] for i, layer_type in enumerate(__lowerCamelCase ): _SCREAMING_SNAKE_CASE = channels_list[i] _SCREAMING_SNAKE_CASE = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(__lowerCamelCase ): _SCREAMING_SNAKE_CASE = F'down_blocks.{i}.resnets.{j}' _SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.0' _SCREAMING_SNAKE_CASE = True if j == 0 and downsample_block_has_skip else False _SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_skip=__lowerCamelCase ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(__lowerCamelCase ): _SCREAMING_SNAKE_CASE = F'down_blocks.{i}.resnets.{j}' _SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.0' _SCREAMING_SNAKE_CASE = True if j == 0 and downsample_block_has_skip else False _SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_skip=__lowerCamelCase ) _SCREAMING_SNAKE_CASE = F'down_blocks.{i}.attentions.{j}' _SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.1' _SCREAMING_SNAKE_CASE = convert_attention( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) current_layer += 1 if i != len(__lowerCamelCase ) - 1: _SCREAMING_SNAKE_CASE = F'down_blocks.{i}.downsamplers.0' _SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.0' _SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) current_layer += 1 _SCREAMING_SNAKE_CASE = current_channels # hardcoded the mid-block for now _SCREAMING_SNAKE_CASE = """mid_block.resnets.0""" _SCREAMING_SNAKE_CASE = """middle_block.0""" _SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE = """mid_block.attentions.0""" _SCREAMING_SNAKE_CASE = """middle_block.1""" _SCREAMING_SNAKE_CASE = convert_attention(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE = """mid_block.resnets.1""" _SCREAMING_SNAKE_CASE = """middle_block.2""" _SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = unet_config["""up_block_types"""] for i, layer_type in enumerate(__lowerCamelCase ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): _SCREAMING_SNAKE_CASE = F'up_blocks.{i}.resnets.{j}' _SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer}.0' _SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_skip=__lowerCamelCase ) current_layer += 1 if i != len(__lowerCamelCase ) - 1: _SCREAMING_SNAKE_CASE = F'up_blocks.{i}.upsamplers.0' _SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer-1}.1' _SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): _SCREAMING_SNAKE_CASE = F'up_blocks.{i}.resnets.{j}' _SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer}.0' _SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_skip=__lowerCamelCase ) _SCREAMING_SNAKE_CASE = F'up_blocks.{i}.attentions.{j}' _SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer}.1' _SCREAMING_SNAKE_CASE = convert_attention( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) current_layer += 1 if i != len(__lowerCamelCase ) - 1: _SCREAMING_SNAKE_CASE = F'up_blocks.{i}.upsamplers.0' _SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer-1}.2' _SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE = checkpoint["""out.0.weight"""] _SCREAMING_SNAKE_CASE = checkpoint["""out.0.bias"""] _SCREAMING_SNAKE_CASE = checkpoint["""out.2.weight"""] _SCREAMING_SNAKE_CASE = checkpoint["""out.2.bias"""] return new_checkpoint if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""") parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model.""" ) parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""") lowercase_ = parser.parse_args() lowercase_ = strabool(args.class_cond) lowercase_ = os.path.basename(args.unet_path) print(f"""Checkpoint: {ckpt_name}""") # Get U-Net config if "imagenet64" in ckpt_name: lowercase_ = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowercase_ = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: lowercase_ = TEST_UNET_CONFIG else: raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""") if not args.class_cond: lowercase_ = None lowercase_ = con_pt_to_diffuser(args.unet_path, unet_config) lowercase_ = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: lowercase_ = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: lowercase_ = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowercase_ = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""") lowercase_ = CMStochasticIterativeScheduler(**scheduler_config) lowercase_ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
58
"""simple docstring""" import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __magic_name__ : '''simple docstring''' def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ): """simple docstring""" lowerCamelCase = parent lowerCamelCase = batch_size lowerCamelCase = image_size lowerCamelCase = patch_size lowerCamelCase = num_channels lowerCamelCase = is_training lowerCamelCase = use_labels lowerCamelCase = hidden_size lowerCamelCase = num_hidden_layers lowerCamelCase = num_attention_heads lowerCamelCase = intermediate_size lowerCamelCase = hidden_act lowerCamelCase = hidden_dropout_prob lowerCamelCase = attention_probs_dropout_prob lowerCamelCase = type_sequence_label_size lowerCamelCase = initializer_range lowerCamelCase = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase = (image_size // patch_size) ** 2 lowerCamelCase = num_patches + 1 def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase = None if self.use_labels: lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase = self.get_config() return config, pixel_values, labels def _lowerCAmelCase ( self ): """simple docstring""" return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def _lowerCAmelCase ( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = ViTMSNModel(config=_a ) model.to(_a ) model.eval() lowerCamelCase = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = self.type_sequence_label_size lowerCamelCase = ViTMSNForImageClassification(_a ) model.to(_a ) model.eval() lowerCamelCase = model(_a , labels=_a ) print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" ) print("""Labels: {labels}""" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase = 1 lowerCamelCase = ViTMSNForImageClassification(_a ) model.to(_a ) model.eval() lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase = model(_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs lowerCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () __UpperCamelCase = ( {"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification} if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = ViTMSNModelTester(self ) lowerCamelCase = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 ) def _lowerCAmelCase ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMSN does not use inputs_embeds""" ) def _lowerCAmelCase ( self ): """simple docstring""" pass def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase = model_class(_a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_a , nn.Linear ) ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase = model_class(_a ) lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase = [*signature.parameters.keys()] lowerCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) @slow def _lowerCAmelCase ( self ): """simple docstring""" for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase = ViTMSNModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def a__ ( ) -> Any: lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowerCAmelCase ( self ): """simple docstring""" return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None @slow def _lowerCAmelCase ( self ): """simple docstring""" torch.manual_seed(2 ) lowerCamelCase = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a ) lowerCamelCase = self.default_image_processor lowerCamelCase = prepare_img() lowerCamelCase = image_processor(images=_a , return_tensors="""pt""" ).to(_a ) # forward pass with torch.no_grad(): lowerCamelCase = model(**_a ) # verify the logits lowerCamelCase = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , _a ) lowerCamelCase = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(_a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
291
0
import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class UpperCAmelCase : def __init__(self : List[str] , snake_case__ : str = "cpu" , snake_case__ : str = "openai/clip-vit-large-patch14" ) -> None: '''simple docstring''' snake_case : int = device snake_case : str = CLIPTokenizerFast.from_pretrained(snake_case__ ) snake_case : List[Any] = [0.48145466, 0.4578275, 0.40821073] snake_case : Optional[int] = [0.26862954, 0.26130258, 0.27577711] snake_case : List[Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std ) snake_case : List[str] = torchvision.transforms.Resize(2_24 ) snake_case : List[Any] = torchvision.transforms.CenterCrop(2_24 ) def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' snake_case : List[str] = self.resize(snake_case__ ) snake_case : Union[str, Any] = self.center_crop(snake_case__ ) snake_case : str = self.normalize(snake_case__ ) return images def __call__(self : Optional[int] , snake_case__ : Optional[int]=None , snake_case__ : List[Any]=None , **snake_case__ : int ) -> List[Any]: '''simple docstring''' snake_case : Dict = self.tokenizer(text=snake_case__ , **snake_case__ ) snake_case : str = self.preprocess_img(snake_case__ ) snake_case : Any = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class UpperCAmelCase ( nn.Module ): def __init__(self : int , snake_case__ : Tuple=10 , snake_case__ : List[str]=0.01 , snake_case__ : Dict=None , snake_case__ : List[str]=None , snake_case__ : Dict=None , snake_case__ : Optional[Any]=None , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None , snake_case__ : Any=False , snake_case__ : Optional[Any]=True , snake_case__ : Any="image" , snake_case__ : str=True , snake_case__ : Any=False , snake_case__ : List[str]=False , snake_case__ : Optional[Any]=False , ) -> None: '''simple docstring''' super().__init__() snake_case : List[str] = None snake_case : Optional[int] = device if device else get_device() if vqgan: snake_case : List[Any] = vqgan else: snake_case : Tuple = load_vqgan(self.device , conf_path=snake_case__ , ckpt_path=snake_case__ ) self.vqgan.eval() if clip: snake_case : Any = clip else: snake_case : Optional[Any] = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" ) self.clip.to(self.device ) snake_case : Optional[Any] = ProcessorGradientFlow(device=self.device ) snake_case : Optional[int] = iterations snake_case : str = lr snake_case : List[Any] = log snake_case : Optional[int] = make_grid snake_case : str = return_val snake_case : List[Any] = quantize snake_case : List[str] = self.vqgan.decoder.z_shape def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Dict=None , snake_case__ : Tuple=None , snake_case__ : Any=5 , snake_case__ : Optional[Any]=True ) -> Any: '''simple docstring''' snake_case : List[Any] = [] if output_path is None: snake_case : List[Any] = "./animation.gif" if input_path is None: snake_case : Any = self.save_path snake_case : int = sorted(glob(input_path + "/*" ) ) if not len(snake_case__ ): raise ValueError( "No images found in save path, aborting (did you pass save_intermediate=True to the generate" " function?)" ) if len(snake_case__ ) == 1: print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" ) snake_case : List[Any] = total_duration / len(snake_case__ ) snake_case : Union[str, Any] = [frame_duration] * len(snake_case__ ) if extend_frames: snake_case : Union[str, Any] = 1.5 snake_case : int = 3 for file_name in paths: if file_name.endswith(".png" ): images.append(imageio.imread(snake_case__ ) ) imageio.mimsave(snake_case__ , snake_case__ , duration=snake_case__ ) print(f"""gif saved to {output_path}""" ) def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : str=None , snake_case__ : List[Any]=None ) -> Union[str, Any]: '''simple docstring''' if not (path or img): raise ValueError("Input either path or tensor" ) if img is not None: raise NotImplementedError snake_case : Optional[Any] = preprocess(Image.open(snake_case__ ) , target_image_size=2_56 ).to(self.device ) snake_case : Any = preprocess_vqgan(snake_case__ ) snake_case , *snake_case : str = self.vqgan.encode(snake_case__ ) return z def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Tuple ) -> Optional[Any]: '''simple docstring''' snake_case : Dict = self.latent.detach().requires_grad_() snake_case : Optional[int] = base_latent + transform_vector if self.quantize: snake_case , *snake_case : Any = self.vqgan.quantize(snake_case__ ) else: snake_case : Union[str, Any] = trans_latent return self.vqgan.decode(snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Optional[int]=None ) -> str: '''simple docstring''' snake_case : Any = self.clip_preprocessor(text=snake_case__ , images=snake_case__ , return_tensors="pt" , padding=snake_case__ ) snake_case : Optional[int] = self.clip(**snake_case__ ) snake_case : List[Any] = clip_outputs.logits_per_image if weights is not None: snake_case : Any = similarity_logits * weights return similarity_logits.sum() def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict ) -> Optional[Any]: '''simple docstring''' snake_case : Any = self._get_clip_similarity(pos_prompts["prompts"] , snake_case__ , weights=(1 / pos_prompts["weights"]) ) if neg_prompts: snake_case : Union[str, Any] = self._get_clip_similarity(neg_prompts["prompts"] , snake_case__ , weights=neg_prompts["weights"] ) else: snake_case : Union[str, Any] = torch.tensor([1] , device=self.device ) snake_case : List[Any] = -torch.log(snake_case__ ) + torch.log(snake_case__ ) return loss def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[Any] ) -> str: '''simple docstring''' snake_case : Union[str, Any] = torch.randn_like(self.latent , requires_grad=snake_case__ , device=self.device ) snake_case : Optional[int] = torch.optim.Adam([vector] , lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() snake_case : Dict = self._add_vector(snake_case__ ) snake_case : Union[str, Any] = loop_post_process(snake_case__ ) snake_case : List[str] = self._get_CLIP_loss(snake_case__ , snake_case__ , snake_case__ ) print("CLIP loss" , snake_case__ ) if self.log: wandb.log({"CLIP Loss": clip_loss} ) clip_loss.backward(retain_graph=snake_case__ ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ) -> List[Any]: '''simple docstring''' wandb.init(reinit=snake_case__ , project="face-editor" ) wandb.config.update({"Positive Prompts": positive_prompts} ) wandb.config.update({"Negative Prompts": negative_prompts} ) wandb.config.update({"lr": self.lr, "iterations": self.iterations} ) if image_path: snake_case : Optional[Any] = Image.open(snake_case__ ) snake_case : Dict = image.resize((2_56, 2_56) ) wandb.log("Original Image" , wandb.Image(snake_case__ ) ) def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' if not prompts: return [] snake_case : Tuple = [] snake_case : List[str] = [] if isinstance(snake_case__ , snake_case__ ): snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("|" )] for prompt in prompts: if isinstance(snake_case__ , (tuple, list) ): snake_case : Tuple = prompt[0] snake_case : Optional[int] = float(prompt[1] ) elif ":" in prompt: snake_case , snake_case : Optional[int] = prompt.split(":" ) snake_case : List[str] = float(snake_case__ ) else: snake_case : Optional[Any] = prompt snake_case : Tuple = 1.0 processed_prompts.append(snake_case__ ) weights.append(snake_case__ ) return { "prompts": processed_prompts, "weights": torch.tensor(snake_case__ , device=self.device ), } def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Dict , snake_case__ : Union[str, Any]=None , snake_case__ : str=None , snake_case__ : Union[str, Any]=True , snake_case__ : Any=False , snake_case__ : List[Any]=True , snake_case__ : List[Any]=True , snake_case__ : Dict=None , ) -> Any: '''simple docstring''' if image_path: snake_case : List[Any] = self._get_latent(snake_case__ ) else: snake_case : Optional[int] = torch.randn(self.latent_dim , device=self.device ) if self.log: self._init_logging(snake_case__ , snake_case__ , snake_case__ ) assert pos_prompts, "You must provide at least one positive prompt." snake_case : Optional[Any] = self.process_prompts(snake_case__ ) snake_case : List[Any] = self.process_prompts(snake_case__ ) if save_final and save_path is None: snake_case : Tuple = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) ) if not os.path.exists(snake_case__ ): os.makedirs(snake_case__ ) else: snake_case : int = save_path + "_" + get_timestamp() os.makedirs(snake_case__ ) snake_case : Tuple = save_path snake_case : str = self.vqgan.decode(self.latent )[0] if show_intermediate: print("Original Image" ) show_pil(custom_to_pil(snake_case__ ) ) snake_case : List[str] = loop_post_process(snake_case__ ) for iter, transformed_img in enumerate(self._optimize_CLIP(snake_case__ , snake_case__ , snake_case__ ) ): if show_intermediate: show_pil(snake_case__ ) if save_intermediate: transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) ) if self.log: wandb.log({"Image": wandb.Image(snake_case__ )} ) if show_final: show_pil(snake_case__ ) if save_final: transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) )
59
"""simple docstring""" import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :] def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__="attention" ) -> List[Any]: lowerCamelCase = lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] ) lowerCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] ) lowerCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] ) lowerCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] ) lowerCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ) -> List[str]: if split_mlp_wi: lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :] lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :] lowerCamelCase = (wi_a, wi_a) else: lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :] lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :] return wi, wo def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple: return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i] def a__ ( snake_case__ , *, snake_case__ , snake_case__ , snake_case__ = False ) -> Dict: lowerCamelCase = traverse_util.flatten_dict(variables["""target"""] ) lowerCamelCase = {"""/""".join(snake_case__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowerCamelCase = """encoder/encoder/mlp/wi_0/kernel""" in old print("""Split MLP:""" , snake_case__ ) lowerCamelCase = collections.OrderedDict() # Shared embeddings. lowerCamelCase = old["""token_embedder/embedding"""] # Encoder. for i in range(snake_case__ ): # Block i, layer 0 (Self Attention). lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_attention_layer_norm""" ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """encoder""" , """attention""" ) lowerCamelCase = layer_norm lowerCamelCase = k.T lowerCamelCase = o.T lowerCamelCase = q.T lowerCamelCase = v.T # Block i, layer 1 (MLP). lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_mlp_layer_norm""" ) lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """encoder""" , snake_case__ ) lowerCamelCase = layer_norm if split_mlp_wi: lowerCamelCase = wi[0].T lowerCamelCase = wi[1].T else: lowerCamelCase = wi.T lowerCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer lowerCamelCase = tax_relpos_bias_lookup( snake_case__ , snake_case__ , """encoder""" ).T lowerCamelCase = old["""encoder/encoder_norm/scale"""] if not scalable_attention: lowerCamelCase = tax_relpos_bias_lookup( snake_case__ , 0 , """encoder""" ).T lowerCamelCase = tax_relpos_bias_lookup( snake_case__ , 0 , """decoder""" ).T if not is_encoder_only: # Decoder. for i in range(snake_case__ ): # Block i, layer 0 (Self Attention). lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_self_attention_layer_norm""" ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """self_attention""" ) lowerCamelCase = layer_norm lowerCamelCase = k.T lowerCamelCase = o.T lowerCamelCase = q.T lowerCamelCase = v.T # Block i, layer 1 (Cross Attention). lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_cross_attention_layer_norm""" ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """encoder_decoder_attention""" ) lowerCamelCase = layer_norm lowerCamelCase = k.T lowerCamelCase = o.T lowerCamelCase = q.T lowerCamelCase = v.T # Block i, layer 2 (MLP). lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_mlp_layer_norm""" ) lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """decoder""" , snake_case__ ) lowerCamelCase = layer_norm if split_mlp_wi: lowerCamelCase = wi[0].T lowerCamelCase = wi[1].T else: lowerCamelCase = wi.T lowerCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer lowerCamelCase = tax_relpos_bias_lookup(snake_case__ , snake_case__ , """decoder""" ).T lowerCamelCase = old["""decoder/decoder_norm/scale"""] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowerCamelCase = old["""decoder/logits_dense/kernel"""].T return new def a__ ( snake_case__ , snake_case__ ) -> Optional[int]: lowerCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowerCamelCase = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowerCamelCase = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) lowerCamelCase = state_dict["""shared.weight"""] return state_dict def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: lowerCamelCase = checkpoints.load_tax_checkpoint(snake_case__ ) lowerCamelCase = convert_tax_to_pytorch( snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ , scalable_attention=snake_case__ ) lowerCamelCase = make_state_dict(snake_case__ , snake_case__ ) model.load_state_dict(snake_case__ , strict=snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = False , ) -> str: lowerCamelCase = MTaConfig.from_json_file(snake_case__ ) print(F'Building PyTorch model from configuration: {config}' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowerCamelCase = UMTaEncoderModel(snake_case__ ) else: lowerCamelCase = UMTaForConditionalGeneration(snake_case__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(snake_case__ ) # Verify that we can load the checkpoint. model.from_pretrained(snake_case__ ) print("""Done""" ) if __name__ == "__main__": lowerCAmelCase : Optional[int] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""") # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False ) parser.add_argument( """--scalable_attention""", action="""store_true""", help="""Whether the model uses scaled attention (umt5 model)""", default=False, ) lowerCAmelCase : int = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
291
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case__ : int = logging.get_logger(__name__) snake_case__ : List[str] = { '''facebook/data2vec-vision-base-ft''': ( '''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json''' ), } class snake_case_( a__ ): __UpperCamelCase = '''data2vec-vision''' def __init__( self : Any , UpperCamelCase_ : Any=7_6_8 , UpperCamelCase_ : Tuple=1_2 , UpperCamelCase_ : Any=1_2 , UpperCamelCase_ : List[str]=3_0_7_2 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : str=0.0 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : str=1E-12 , UpperCamelCase_ : Dict=2_2_4 , UpperCamelCase_ : List[str]=1_6 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Any=True , UpperCamelCase_ : str=[3, 5, 7, 1_1] , UpperCamelCase_ : Dict=[1, 2, 3, 6] , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Any=0.4 , UpperCamelCase_ : str=2_5_6 , UpperCamelCase_ : Dict=1 , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : str=2_5_5 , **UpperCamelCase_ : Optional[Any] , ): super().__init__(**UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = hidden_size lowerCAmelCase : Optional[int] = num_hidden_layers lowerCAmelCase : Optional[int] = num_attention_heads lowerCAmelCase : List[str] = intermediate_size lowerCAmelCase : str = hidden_act lowerCAmelCase : Dict = hidden_dropout_prob lowerCAmelCase : Any = attention_probs_dropout_prob lowerCAmelCase : List[str] = initializer_range lowerCAmelCase : str = layer_norm_eps lowerCAmelCase : List[Any] = image_size lowerCAmelCase : Dict = patch_size lowerCAmelCase : Tuple = num_channels lowerCAmelCase : List[Any] = use_mask_token lowerCAmelCase : List[str] = use_absolute_position_embeddings lowerCAmelCase : Optional[int] = use_relative_position_bias lowerCAmelCase : List[str] = use_shared_relative_position_bias lowerCAmelCase : Tuple = layer_scale_init_value lowerCAmelCase : int = drop_path_rate lowerCAmelCase : int = use_mean_pooling # decode head attributes (semantic segmentation) lowerCAmelCase : Optional[int] = out_indices lowerCAmelCase : Optional[int] = pool_scales # auxiliary head attributes (semantic segmentation) lowerCAmelCase : Tuple = use_auxiliary_head lowerCAmelCase : Optional[Any] = auxiliary_loss_weight lowerCAmelCase : List[Any] = auxiliary_channels lowerCAmelCase : Dict = auxiliary_num_convs lowerCAmelCase : List[str] = auxiliary_concat_input lowerCAmelCase : int = semantic_loss_ignore_index class snake_case_( a__ ): __UpperCamelCase = version.parse('''1.11''' ) @property def lowerCamelCase__ ( self : str ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowerCamelCase__ ( self : str ): return 1E-4
60
"""simple docstring""" from __future__ import annotations def a__ ( snake_case__ , snake_case__ ) -> bool: if len(snake_case__ ) == 0: return False lowerCamelCase = len(snake_case__ ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , snake_case__ ) else: return binary_search(a_list[midpoint + 1 :] , snake_case__ ) if __name__ == "__main__": lowerCAmelCase : List[Any] = input("""Enter numbers separated by comma:\n""").strip() lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(""",""")] lowerCAmelCase : Optional[int] = int(input("""Enter the number to be found in the list:\n""").strip()) lowerCAmelCase : Union[str, Any] = """""" if binary_search(sequence, target) else """not """ print(F"""{target} was {not_str}found in {sequence}""")
291
0
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _a = {'LayoutLMv2Config', 'LayoutLMv3Config'} @is_pipeline_test class A_ (unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE__ : Optional[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: SCREAMING_SNAKE_CASE__ : List[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: SCREAMING_SNAKE_CASE__ : int = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = pipeline( task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" ) UpperCAmelCase_ : Dict = text_classifier("This is great !" ) self.assertEqual(nested_simplify(lowercase_ ) , [{"label": "LABEL_0", "score": 0.5_04}] ) UpperCAmelCase_ : Dict = text_classifier("This is great !" , top_k=2 ) self.assertEqual( nested_simplify(lowercase_ ) , [{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}] ) UpperCAmelCase_ : int = text_classifier(["This is great !", "This is bad"] , top_k=2 ) self.assertEqual( nested_simplify(lowercase_ ) , [ [{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}], [{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}], ] , ) UpperCAmelCase_ : List[Any] = text_classifier("This is great !" , top_k=1 ) self.assertEqual(nested_simplify(lowercase_ ) , [{"label": "LABEL_0", "score": 0.5_04}] ) # Legacy behavior UpperCAmelCase_ : Union[str, Any] = text_classifier("This is great !" , return_all_scores=lowercase_ ) self.assertEqual(nested_simplify(lowercase_ ) , [{"label": "LABEL_0", "score": 0.5_04}] ) UpperCAmelCase_ : Optional[Any] = text_classifier("This is great !" , return_all_scores=lowercase_ ) self.assertEqual( nested_simplify(lowercase_ ) , [[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}]] ) UpperCAmelCase_ : Optional[int] = text_classifier(["This is great !", "Something else"] , return_all_scores=lowercase_ ) self.assertEqual( nested_simplify(lowercase_ ) , [ [{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}], [{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}], ] , ) UpperCAmelCase_ : Optional[int] = text_classifier(["This is great !", "Something else"] , return_all_scores=lowercase_ ) self.assertEqual( nested_simplify(lowercase_ ) , [ {"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_0", "score": 0.5_04}, ] , ) @require_torch def UpperCamelCase__ ( self ): """simple docstring""" import torch UpperCAmelCase_ : Optional[Any] = pipeline( task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , ) UpperCAmelCase_ : Dict = text_classifier("This is great !" ) self.assertEqual(nested_simplify(lowercase_ ) , [{"label": "LABEL_0", "score": 0.5_04}] ) @require_tf def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = pipeline( task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" ) UpperCAmelCase_ : Optional[Any] = text_classifier("This is great !" ) self.assertEqual(nested_simplify(lowercase_ ) , [{"label": "LABEL_0", "score": 0.5_04}] ) @slow @require_torch def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Any = pipeline("text-classification" ) UpperCAmelCase_ : Tuple = text_classifier("This is great !" ) self.assertEqual(nested_simplify(lowercase_ ) , [{"label": "POSITIVE", "score": 1.0}] ) UpperCAmelCase_ : int = text_classifier("This is bad !" ) self.assertEqual(nested_simplify(lowercase_ ) , [{"label": "NEGATIVE", "score": 1.0}] ) UpperCAmelCase_ : Optional[Any] = text_classifier("Birds are a type of animal" ) self.assertEqual(nested_simplify(lowercase_ ) , [{"label": "POSITIVE", "score": 0.9_88}] ) @slow @require_tf def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = pipeline("text-classification" , framework="tf" ) UpperCAmelCase_ : int = text_classifier("This is great !" ) self.assertEqual(nested_simplify(lowercase_ ) , [{"label": "POSITIVE", "score": 1.0}] ) UpperCAmelCase_ : List[Any] = text_classifier("This is bad !" ) self.assertEqual(nested_simplify(lowercase_ ) , [{"label": "NEGATIVE", "score": 1.0}] ) UpperCAmelCase_ : Union[str, Any] = text_classifier("Birds are a type of animal" ) self.assertEqual(nested_simplify(lowercase_ ) , [{"label": "POSITIVE", "score": 0.9_88}] ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Any = TextClassificationPipeline(model=lowercase_ , tokenizer=lowercase_ ) return text_classifier, ["HuggingFace is in", "This is another test"] def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 UpperCAmelCase_ : str = "HuggingFace is in" UpperCAmelCase_ : Dict = text_classifier(lowercase_ ) self.assertEqual(nested_simplify(lowercase_ ) , [{"label": ANY(lowercase_ ), "score": ANY(lowercase_ )}] ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() ) UpperCAmelCase_ : Any = ["HuggingFace is in ", "Paris is in France"] UpperCAmelCase_ : List[Any] = text_classifier(lowercase_ ) self.assertEqual( nested_simplify(lowercase_ ) , [{"label": ANY(lowercase_ ), "score": ANY(lowercase_ )}, {"label": ANY(lowercase_ ), "score": ANY(lowercase_ )}] , ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() ) self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format UpperCAmelCase_ : Optional[int] = text_classifier(lowercase_ , top_k=lowercase_ ) UpperCAmelCase_ : int = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(lowercase_ ) , [[{"label": ANY(lowercase_ ), "score": ANY(lowercase_ )}] * N, [{"label": ANY(lowercase_ ), "score": ANY(lowercase_ )}] * N] , ) UpperCAmelCase_ : List[Any] = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"} UpperCAmelCase_ : int = text_classifier(lowercase_ ) self.assertEqual( nested_simplify(lowercase_ ) , {"label": ANY(lowercase_ ), "score": ANY(lowercase_ )} , ) self.assertTrue(outputs["label"] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. UpperCAmelCase_ : Dict = [["HuggingFace is in ", "Paris is in France"]] with self.assertRaises(lowercase_ ): text_classifier(lowercase_ ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility UpperCAmelCase_ : Optional[int] = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] ) self.assertEqual( nested_simplify(lowercase_ ) , [{"label": ANY(lowercase_ ), "score": ANY(lowercase_ )}] , ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
61
"""simple docstring""" def a__ ( snake_case__ ) -> list: if len(snake_case__ ) < 2: return collection def circle_sort_util(snake_case__ , snake_case__ , snake_case__ ) -> bool: lowerCamelCase = False if low == high: return swapped lowerCamelCase = low lowerCamelCase = high while left < right: if collection[left] > collection[right]: lowerCamelCase , lowerCamelCase = ( collection[right], collection[left], ) lowerCamelCase = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: lowerCamelCase , lowerCamelCase = ( collection[right + 1], collection[left], ) lowerCamelCase = True lowerCamelCase = low + int((high - low) / 2 ) lowerCamelCase = circle_sort_util(snake_case__ , snake_case__ , snake_case__ ) lowerCamelCase = circle_sort_util(snake_case__ , mid + 1 , snake_case__ ) return swapped or left_swap or right_swap lowerCamelCase = True while is_not_sorted is True: lowerCamelCase = circle_sort_util(snake_case__ , 0 , len(snake_case__ ) - 1 ) return collection if __name__ == "__main__": lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip() lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(""",""")] print(circle_sort(unsorted))
291
0
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig _A = logging.get_logger(__name__) # General docstring _A = 'ResNetConfig' # Base docstring _A = 'microsoft/resnet-50' _A = [1, 2048, 7, 7] # Image classification docstring _A = 'microsoft/resnet-50' _A = 'tiger cat' _A = [ 'microsoft/resnet-50', # See all resnet models at https://huggingface.co/models?filter=resnet ] class UpperCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self , A_ , A_ , A_ = 3 , A_ = 1 , A_ = "relu" ) -> Optional[Any]: super().__init__() __UpperCamelCase =nn.Convad( A_ , A_ , kernel_size=A_ , stride=A_ , padding=kernel_size // 2 , bias=A_ ) __UpperCamelCase =nn.BatchNormad(A_ ) __UpperCamelCase =ACTaFN[activation] if activation is not None else nn.Identity() def _a ( self , A_ ) -> Tensor: __UpperCamelCase =self.convolution(A_ ) __UpperCamelCase =self.normalization(A_ ) __UpperCamelCase =self.activation(A_ ) return hidden_state class UpperCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self , A_ ) -> int: super().__init__() __UpperCamelCase =ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) __UpperCamelCase =nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) __UpperCamelCase =config.num_channels def _a ( self , A_ ) -> Tensor: __UpperCamelCase =pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) __UpperCamelCase =self.embedder(A_ ) __UpperCamelCase =self.pooler(A_ ) return embedding class UpperCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self , A_ , A_ , A_ = 2 ) -> Optional[int]: super().__init__() __UpperCamelCase =nn.Convad(A_ , A_ , kernel_size=1 , stride=A_ , bias=A_ ) __UpperCamelCase =nn.BatchNormad(A_ ) def _a ( self , A_ ) -> Tensor: __UpperCamelCase =self.convolution(A_ ) __UpperCamelCase =self.normalization(A_ ) return hidden_state class UpperCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self , A_ , A_ , A_ = 1 , A_ = "relu" ) -> Tuple: super().__init__() __UpperCamelCase =in_channels != out_channels or stride != 1 __UpperCamelCase =( ResNetShortCut(A_ , A_ , stride=A_ ) if should_apply_shortcut else nn.Identity() ) __UpperCamelCase =nn.Sequential( ResNetConvLayer(A_ , A_ , stride=A_ ) , ResNetConvLayer(A_ , A_ , activation=A_ ) , ) __UpperCamelCase =ACTaFN[activation] def _a ( self , A_ ) -> List[Any]: __UpperCamelCase =hidden_state __UpperCamelCase =self.layer(A_ ) __UpperCamelCase =self.shortcut(A_ ) hidden_state += residual __UpperCamelCase =self.activation(A_ ) return hidden_state class UpperCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self , A_ , A_ , A_ = 1 , A_ = "relu" , A_ = 4 ) -> Tuple: super().__init__() __UpperCamelCase =in_channels != out_channels or stride != 1 __UpperCamelCase =out_channels // reduction __UpperCamelCase =( ResNetShortCut(A_ , A_ , stride=A_ ) if should_apply_shortcut else nn.Identity() ) __UpperCamelCase =nn.Sequential( ResNetConvLayer(A_ , A_ , kernel_size=1 ) , ResNetConvLayer(A_ , A_ , stride=A_ ) , ResNetConvLayer(A_ , A_ , kernel_size=1 , activation=A_ ) , ) __UpperCamelCase =ACTaFN[activation] def _a ( self , A_ ) -> int: __UpperCamelCase =hidden_state __UpperCamelCase =self.layer(A_ ) __UpperCamelCase =self.shortcut(A_ ) hidden_state += residual __UpperCamelCase =self.activation(A_ ) return hidden_state class UpperCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self , A_ , A_ , A_ , A_ = 2 , A_ = 2 , ) -> int: super().__init__() __UpperCamelCase =ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer __UpperCamelCase =nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(A_ , A_ , stride=A_ , activation=config.hidden_act ) , *[layer(A_ , A_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def _a ( self , A_ ) -> Tensor: __UpperCamelCase =input for layer in self.layers: __UpperCamelCase =layer(A_ ) return hidden_state class UpperCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self , A_ ) -> Any: super().__init__() __UpperCamelCase =nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) __UpperCamelCase =zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(A_ , config.depths[1:] ): self.stages.append(ResNetStage(A_ , A_ , A_ , depth=A_ ) ) def _a ( self , A_ , A_ = False , A_ = True ) -> BaseModelOutputWithNoAttention: __UpperCamelCase =() if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __UpperCamelCase =hidden_states + (hidden_state,) __UpperCamelCase =stage_module(A_ ) if output_hidden_states: __UpperCamelCase =hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=A_ , hidden_states=A_ , ) class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : List[Any] = ResNetConfig UpperCAmelCase__ : Any = "resnet" UpperCAmelCase__ : Optional[int] = "pixel_values" UpperCAmelCase__ : List[str] = True def _a ( self , A_ ) -> str: if isinstance(A_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(A_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def _a ( self , A_ , A_=False ) -> str: if isinstance(A_ , A_ ): __UpperCamelCase =value _A = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' _A = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare ResNet model outputting raw features without any specific head on top." , A_ , ) class UpperCAmelCase__ ( A_ ): """simple docstring""" def __init__( self , A_ ) -> List[Any]: super().__init__(A_ ) __UpperCamelCase =config __UpperCamelCase =ResNetEmbeddings(A_ ) __UpperCamelCase =ResNetEncoder(A_ ) __UpperCamelCase =nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(A_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _a ( self , A_ , A_ = None , A_ = None ) -> BaseModelOutputWithPoolingAndNoAttention: __UpperCamelCase =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase =return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase =self.embedder(A_ ) __UpperCamelCase =self.encoder( A_ , output_hidden_states=A_ , return_dict=A_ ) __UpperCamelCase =encoder_outputs[0] __UpperCamelCase =self.pooler(A_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=A_ , pooler_output=A_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( "\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , A_ , ) class UpperCAmelCase__ ( A_ ): """simple docstring""" def __init__( self , A_ ) -> List[Any]: super().__init__(A_ ) __UpperCamelCase =config.num_labels __UpperCamelCase =ResNetModel(A_ ) # classification head __UpperCamelCase =nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(A_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _a ( self , A_ = None , A_ = None , A_ = None , A_ = None , ) -> ImageClassifierOutputWithNoAttention: __UpperCamelCase =return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase =self.resnet(A_ , output_hidden_states=A_ , return_dict=A_ ) __UpperCamelCase =outputs.pooler_output if return_dict else outputs[1] __UpperCamelCase =self.classifier(A_ ) __UpperCamelCase =None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __UpperCamelCase ='regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __UpperCamelCase ='single_label_classification' else: __UpperCamelCase ='multi_label_classification' if self.config.problem_type == "regression": __UpperCamelCase =MSELoss() if self.num_labels == 1: __UpperCamelCase =loss_fct(logits.squeeze() , labels.squeeze() ) else: __UpperCamelCase =loss_fct(A_ , A_ ) elif self.config.problem_type == "single_label_classification": __UpperCamelCase =CrossEntropyLoss() __UpperCamelCase =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __UpperCamelCase =BCEWithLogitsLoss() __UpperCamelCase =loss_fct(A_ , A_ ) if not return_dict: __UpperCamelCase =(logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states ) @add_start_docstrings( "\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , A_ , ) class UpperCAmelCase__ ( A_ , A_ ): """simple docstring""" def __init__( self , A_ ) -> List[Any]: super().__init__(A_ ) super()._init_backbone(A_ ) __UpperCamelCase =[config.embedding_size] + config.hidden_sizes __UpperCamelCase =ResNetEmbeddings(A_ ) __UpperCamelCase =ResNetEncoder(A_ ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(A_ ) @replace_return_docstrings(output_type=A_ , config_class=_CONFIG_FOR_DOC ) def _a ( self , A_ , A_ = None , A_ = None ) -> BackboneOutput: __UpperCamelCase =return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase =self.embedder(A_ ) __UpperCamelCase =self.encoder(A_ , output_hidden_states=A_ , return_dict=A_ ) __UpperCamelCase =outputs.hidden_states __UpperCamelCase =() for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: __UpperCamelCase =(feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=A_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=A_ , )
62
"""simple docstring""" from collections.abc import Generator def a__ ( ) -> Generator[int, None, None]: lowerCamelCase , lowerCamelCase = 0, 1 while True: lowerCamelCase , lowerCamelCase = b, a + b yield b def a__ ( snake_case__ = 10_00 ) -> int: lowerCamelCase = 1 lowerCamelCase = fibonacci_generator() while len(str(next(snake_case__ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
291
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) lowerCAmelCase_ : Optional[Any] = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='roberta' def __init__( self : Any , __a : List[Any]=5_02_65 , __a : Union[str, Any]=7_68 , __a : Optional[Any]=12 , __a : Any=12 , __a : Union[str, Any]=30_72 , __a : List[str]="gelu" , __a : Optional[int]=0.1 , __a : Optional[Any]=0.1 , __a : Optional[int]=5_12 , __a : Any=2 , __a : Dict=0.02 , __a : List[Any]=1e-1_2 , __a : Any=1 , __a : Optional[int]=0 , __a : int=2 , __a : List[str]="absolute" , __a : Dict=True , __a : Optional[int]=None , **__a : List[str] , ): super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) _a = vocab_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = hidden_act _a = intermediate_size _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = type_vocab_size _a = initializer_range _a = layer_norm_eps _a = position_embedding_type _a = use_cache _a = classifier_dropout class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" @property def UpperCamelCase__ ( self : Tuple ): if self.task == "multiple-choice": _a = {0: "batch", 1: "choice", 2: "sequence"} else: _a = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
63
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase : List[str] = logging.get_logger(__name__) class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["audio_values", "audio_mask"] def __init__( self , _a=2_048 , _a=1 , _a=[16, 16] , _a=128 , _a=44_100 , _a=86 , _a=2_048 , _a=0.0 , **_a , ): """simple docstring""" super().__init__( feature_size=_a , sampling_rate=_a , padding_value=_a , **_a , ) lowerCamelCase = spectrogram_length lowerCamelCase = num_channels lowerCamelCase = patch_size lowerCamelCase = feature_size // self.patch_size[1] lowerCamelCase = n_fft lowerCamelCase = sampling_rate // hop_length_to_sampling_rate lowerCamelCase = sampling_rate lowerCamelCase = padding_value lowerCamelCase = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=_a , norm="""slaney""" , mel_scale="""slaney""" , ).T def _lowerCAmelCase ( self , _a ): """simple docstring""" lowerCamelCase = spectrogram( _a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , ) lowerCamelCase = log_spec[:, :-1] lowerCamelCase = log_spec - 20.0 lowerCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , _a , _a = None , _a = True , _a = None , _a = False , _a = False , **_a , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( """This feature extractor is set to support sampling rate""" f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled' f' with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) lowerCamelCase = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) lowerCamelCase = is_batched_numpy or ( isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_a , np.ndarray ): lowerCamelCase = np.asarray(_a , dtype=np.floataa ) elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowerCamelCase = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , _a ): lowerCamelCase = [np.asarray(_a , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowerCamelCase = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowerCamelCase = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowerCamelCase = np.array(_a ).astype(np.floataa ) # convert into correct format for padding lowerCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowerCamelCase = np.ones([len(_a ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowerCamelCase = padded_audio_features * self.padding_value for i in range(len(_a ) ): lowerCamelCase = audio_features[i] lowerCamelCase = feature # return as BatchFeature if return_attention_mask: lowerCamelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask} else: lowerCamelCase = {"""audio_values""": padded_audio_features} lowerCamelCase = BatchFeature(data=_a , tensor_type=_a ) return encoded_inputs
291
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig class lowercase( __a ): '''simple docstring''' lowercase__ = "bert-generation" def __init__( self: str, a_: str=50_358, a_: List[Any]=1_024, a_: Union[str, Any]=24, a_: int=16, a_: Union[str, Any]=4_096, a_: Union[str, Any]="gelu", a_: Tuple=0.1, a_: Any=0.1, a_: Union[str, Any]=512, a_: List[str]=0.02, a_: List[str]=1E-12, a_: Union[str, Any]=0, a_: Union[str, Any]=2, a_: List[Any]=1, a_: int="absolute", a_: Dict=True, **a_: List[Any], ): '''simple docstring''' super().__init__(pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, **a_ ) _snake_case : Optional[int] = vocab_size _snake_case : List[Any] = hidden_size _snake_case : str = num_hidden_layers _snake_case : Tuple = num_attention_heads _snake_case : Union[str, Any] = hidden_act _snake_case : Optional[Any] = intermediate_size _snake_case : Tuple = hidden_dropout_prob _snake_case : int = attention_probs_dropout_prob _snake_case : Any = max_position_embeddings _snake_case : List[str] = initializer_range _snake_case : Optional[Any] = layer_norm_eps _snake_case : List[Any] = position_embedding_type _snake_case : Tuple = use_cache
64
"""simple docstring""" from math import ceil def a__ ( snake_case__ , snake_case__ ) -> Optional[int]: lowerCamelCase = list(range(0 , snake_case__ ) ) lowerCamelCase = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check lowerCamelCase = [] for i in device_map_blocks: if device_map_blocks.count(snake_case__ ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(snake_case__ ) # Missing blocks lowerCamelCase = [i for i in blocks if i not in device_map_blocks] lowerCamelCase = [i for i in device_map_blocks if i not in blocks] if len(snake_case__ ) != 0: raise ValueError( """Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.""" """ These attention blocks were specified more than once: """ + str(snake_case__ ) ) if len(snake_case__ ) != 0: raise ValueError( """There are attention blocks for this model that are not specified in the device_map. Add these attention """ """blocks to a device on the device_map: """ + str(snake_case__ ) ) if len(snake_case__ ) != 0: raise ValueError( """The device_map contains more attention blocks than this model has. Remove these from the device_map:""" + str(snake_case__ ) ) def a__ ( snake_case__ , snake_case__ ) -> List[Any]: lowerCamelCase = list(range(snake_case__ ) ) lowerCamelCase = int(ceil(n_layers / len(snake_case__ ) ) ) lowerCamelCase = [layers[i : i + n_blocks] for i in range(0 , snake_case__ , snake_case__ )] return dict(zip(snake_case__ , snake_case__ ) )
291
0
def lowerCAmelCase_ ( __A ) -> str: '''simple docstring''' UpperCAmelCase__ = "" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def lowerCAmelCase_ ( __A ) -> dict[str, str]: '''simple docstring''' UpperCAmelCase__ = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key UpperCAmelCase__ = remove_duplicates(key.upper() ) UpperCAmelCase__ = len(__A ) # First fill cipher with key characters UpperCAmelCase__ = {alphabet[i]: char for i, char in enumerate(__A )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(__A ), 26 ): UpperCAmelCase__ = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 UpperCAmelCase__ = alphabet[i - offset] UpperCAmelCase__ = char return cipher_alphabet def lowerCAmelCase_ ( __A, __A ) -> str: '''simple docstring''' return "".join(cipher_map.get(__A, __A ) for ch in message.upper() ) def lowerCAmelCase_ ( __A, __A ) -> str: '''simple docstring''' UpperCAmelCase__ = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(__A, __A ) for ch in message.upper() ) def lowerCAmelCase_ ( ) -> None: '''simple docstring''' UpperCAmelCase__ = input("Enter message to encode or decode: " ).strip() UpperCAmelCase__ = input("Enter keyword: " ).strip() UpperCAmelCase__ = input("Encipher or decipher? E/D:" ).strip()[0].lower() try: UpperCAmelCase__ = {"e": encipher, "d": decipher}[option] except KeyError: raise KeyError("invalid input option" ) UpperCAmelCase__ = create_cipher_map(__A ) print(func(__A, __A ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
65
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __magic_name__ ( unittest.TestCase ): '''simple docstring''' def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ): """simple docstring""" lowerCamelCase = parent lowerCamelCase = batch_size lowerCamelCase = seq_length lowerCamelCase = is_training lowerCamelCase = use_attention_mask lowerCamelCase = use_token_type_ids lowerCamelCase = use_labels lowerCamelCase = vocab_size lowerCamelCase = hidden_size lowerCamelCase = num_hidden_layers lowerCamelCase = num_attention_heads lowerCamelCase = intermediate_size lowerCamelCase = hidden_act lowerCamelCase = hidden_dropout_prob lowerCamelCase = attention_probs_dropout_prob lowerCamelCase = max_position_embeddings lowerCamelCase = type_vocab_size lowerCamelCase = type_sequence_label_size lowerCamelCase = initializer_range lowerCamelCase = num_choices def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase = None if self.use_attention_mask: lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase = None if self.use_token_type_ids: lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = True __UpperCamelCase = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = FlaxRoFormerModelTester(self ) @slow def _lowerCAmelCase ( self ): """simple docstring""" for model_class_name in self.all_model_classes: lowerCamelCase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_a ) lowerCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(_a ) @require_flax class __magic_name__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) lowerCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase = model(_a )[0] lowerCamelCase = 50_000 lowerCamelCase = (1, 6, vocab_size) self.assertEqual(output.shape , _a ) lowerCamelCase = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
291
0
"""simple docstring""" __a = {} def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on snake_case_ :str = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one snake_case_ :Any = _calculate(days - 1, _lowercase, late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 snake_case_ :Optional[int] = _calculate(days - 1, absent + 1, 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter snake_case_ :str = _calculate(days - 1, _lowercase, 0 ) snake_case_ :Optional[Any] = state_late + state_absent + state_ontime snake_case_ :Optional[Any] = prizestrings return prizestrings def A_ ( _lowercase = 30 ): '''simple docstring''' return _calculate(_lowercase, absent=0, late=0 ) if __name__ == "__main__": print(solution())
66
"""simple docstring""" from typing import Any def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> list: _validation( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) # Creates data structures and fill initial step lowerCamelCase = {} lowerCamelCase = {} for state in states_space: lowerCamelCase = observations_space[0] lowerCamelCase = ( initial_probabilities[state] * emission_probabilities[state][observation] ) lowerCamelCase = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(snake_case__ ) ): lowerCamelCase = observations_space[o] lowerCamelCase = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function lowerCamelCase = """""" lowerCamelCase = -1 for k_state in states_space: lowerCamelCase = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: lowerCamelCase = probability lowerCamelCase = k_state # Update probabilities and pointers dicts lowerCamelCase = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) lowerCamelCase = arg_max # The final observation lowerCamelCase = observations_space[len(snake_case__ ) - 1] # argmax for given final observation lowerCamelCase = """""" lowerCamelCase = -1 for k_state in states_space: lowerCamelCase = probabilities[(k_state, final_observation)] if probability > max_probability: lowerCamelCase = probability lowerCamelCase = k_state lowerCamelCase = arg_max # Process pointers backwards lowerCamelCase = last_state lowerCamelCase = [] for o in range(len(snake_case__ ) - 1 , -1 , -1 ): result.append(snake_case__ ) lowerCamelCase = pointers[previous, observations_space[o]] result.reverse() return result def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None: _validate_not_empty( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) _validate_lists(snake_case__ , snake_case__ ) _validate_dicts( snake_case__ , snake_case__ , snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None: if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("""There's an empty parameter""" ) def a__ ( snake_case__ , snake_case__ ) -> None: _validate_list(snake_case__ , """observations_space""" ) _validate_list(snake_case__ , """states_space""" ) def a__ ( snake_case__ , snake_case__ ) -> None: if not isinstance(_object , snake_case__ ): lowerCamelCase = F'{var_name} must be a list' raise ValueError(snake_case__ ) else: for x in _object: if not isinstance(snake_case__ , snake_case__ ): lowerCamelCase = F'{var_name} must be a list of strings' raise ValueError(snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , ) -> None: _validate_dict(snake_case__ , """initial_probabilities""" , snake_case__ ) _validate_nested_dict(snake_case__ , """transition_probabilities""" ) _validate_nested_dict(snake_case__ , """emission_probabilities""" ) def a__ ( snake_case__ , snake_case__ ) -> None: _validate_dict(_object , snake_case__ , snake_case__ ) for x in _object.values(): _validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False ) -> None: if not isinstance(_object , snake_case__ ): lowerCamelCase = F'{var_name} must be a dict' raise ValueError(snake_case__ ) if not all(isinstance(snake_case__ , snake_case__ ) for x in _object ): lowerCamelCase = F'{var_name} all keys must be strings' raise ValueError(snake_case__ ) if not all(isinstance(snake_case__ , snake_case__ ) for x in _object.values() ): lowerCamelCase = """nested dictionary """ if nested else """""" lowerCamelCase = F'{var_name} {nested_text}all values must be {value_type.__name__}' raise ValueError(snake_case__ ) if __name__ == "__main__": from doctest import testmod testmod()
291
0
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__="pt" ) -> List[Any]: __lowerCamelCase = {'''add_prefix_space''': True} if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and not line.startswith(''' ''' ) else {} __lowerCamelCase = padding_side return tokenizer( [line] , max_length=UpperCamelCase__ , padding='''max_length''' if pad_to_max_length else None , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , ) def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , ) -> Tuple: __lowerCamelCase = input_ids.ne(UpperCamelCase__ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class a__ ( UpperCAmelCase__ ): def __init__( self : Dict , a : Optional[Any] , a : int , a : Optional[int] , a : Union[str, Any] , a : Optional[int]="train" , a : List[str]=None , a : List[str]=None , a : List[str]=None , a : Dict="" , ): """simple docstring""" super().__init__() __lowerCamelCase = Path(a ).joinpath(type_path + '''.source''' ) __lowerCamelCase = Path(a ).joinpath(type_path + '''.target''' ) __lowerCamelCase = self.get_char_lens(self.src_file ) __lowerCamelCase = max_source_length __lowerCamelCase = max_target_length assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}""" __lowerCamelCase = tokenizer __lowerCamelCase = prefix if n_obs is not None: __lowerCamelCase = self.src_lens[:n_obs] __lowerCamelCase = src_lang __lowerCamelCase = tgt_lang def __len__( self : Dict ): """simple docstring""" return len(self.src_lens ) def __getitem__( self : int , a : Any ): """simple docstring""" __lowerCamelCase = index + 1 # linecache starts at 1 __lowerCamelCase = self.prefix + linecache.getline(str(self.src_file ) , a ).rstrip('''\n''' ) __lowerCamelCase = linecache.getline(str(self.tgt_file ) , a ).rstrip('''\n''' ) assert source_line, f"""empty source line for index {index}""" assert tgt_line, f"""empty tgt line for index {index}""" # Need to add eos token manually for T5 if isinstance(self.tokenizer , a ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right __lowerCamelCase = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , a ) else self.tokenizer ) __lowerCamelCase = self.tokenizer.generator if isinstance(self.tokenizer , a ) else self.tokenizer __lowerCamelCase = encode_line(a , a , self.max_source_length , '''right''' ) __lowerCamelCase = encode_line(a , a , self.max_target_length , '''right''' ) __lowerCamelCase = source_inputs['''input_ids'''].squeeze() __lowerCamelCase = target_inputs['''input_ids'''].squeeze() __lowerCamelCase = source_inputs['''attention_mask'''].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def SCREAMING_SNAKE_CASE__ ( a : Any ): """simple docstring""" return [len(a ) for x in Path(a ).open().readlines()] def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Optional[Any] ): """simple docstring""" __lowerCamelCase = torch.stack([x['''input_ids'''] for x in batch] ) __lowerCamelCase = torch.stack([x['''attention_mask'''] for x in batch] ) __lowerCamelCase = torch.stack([x['''decoder_input_ids'''] for x in batch] ) __lowerCamelCase = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , a ) else self.tokenizer.pad_token_id ) __lowerCamelCase = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , a ) else self.tokenizer.pad_token_id ) __lowerCamelCase = trim_batch(a , a ) __lowerCamelCase , __lowerCamelCase = trim_batch(a , a , attention_mask=a ) __lowerCamelCase = { '''input_ids''': source_ids, '''attention_mask''': source_mask, '''decoder_input_ids''': y, } return batch __UpperCAmelCase =getLogger(__name__) def __lowerCAmelCase ( UpperCamelCase__ ) -> Dict: return list(itertools.chain.from_iterable(UpperCamelCase__ ) ) def __lowerCAmelCase ( UpperCamelCase__ ) -> None: __lowerCamelCase = get_git_info() save_json(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''git_log.json''' ) ) def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=4 , **UpperCamelCase__ ) -> Tuple: with open(UpperCamelCase__ , '''w''' ) as f: json.dump(UpperCamelCase__ , UpperCamelCase__ , indent=UpperCamelCase__ , **UpperCamelCase__ ) def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]: with open(UpperCamelCase__ ) as f: return json.load(UpperCamelCase__ ) def __lowerCAmelCase ( ) -> Union[str, Any]: __lowerCamelCase = git.Repo(search_parent_directories=UpperCamelCase__ ) __lowerCamelCase = { '''repo_id''': str(UpperCamelCase__ ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), '''hostname''': str(socket.gethostname() ), } return repo_infos def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List: return list(map(UpperCamelCase__ , UpperCamelCase__ ) ) def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict: with open(UpperCamelCase__ , '''wb''' ) as f: return pickle.dump(UpperCamelCase__ , UpperCamelCase__ ) def __lowerCAmelCase ( UpperCamelCase__ ) -> Dict: def remove_articles(UpperCamelCase__ ): return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , UpperCamelCase__ ) def white_space_fix(UpperCamelCase__ ): return " ".join(text.split() ) def remove_punc(UpperCamelCase__ ): __lowerCamelCase = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(UpperCamelCase__ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__ ) ) ) ) def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> str: __lowerCamelCase = normalize_answer(UpperCamelCase__ ).split() __lowerCamelCase = normalize_answer(UpperCamelCase__ ).split() __lowerCamelCase = Counter(UpperCamelCase__ ) & Counter(UpperCamelCase__ ) __lowerCamelCase = sum(common.values() ) if num_same == 0: return 0 __lowerCamelCase = 1.0 * num_same / len(UpperCamelCase__ ) __lowerCamelCase = 1.0 * num_same / len(UpperCamelCase__ ) __lowerCamelCase = (2 * precision * recall) / (precision + recall) return fa def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]: return normalize_answer(UpperCamelCase__ ) == normalize_answer(UpperCamelCase__ ) def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict: assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ) __lowerCamelCase = 0 for hypo, pred in zip(UpperCamelCase__ , UpperCamelCase__ ): em += exact_match_score(UpperCamelCase__ , UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: em /= len(UpperCamelCase__ ) return {"em": em} def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple: return model_prefix.startswith('''rag''' ) def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]: __lowerCamelCase = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead __lowerCamelCase = '''dropout_rate''' for p in extra_params: if getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and not hasattr(UpperCamelCase__ , equivalent_param[p] ): logger.info('''config doesn\'t have a `{}` attribute'''.format(UpperCamelCase__ ) ) delattr(UpperCamelCase__ , UpperCamelCase__ ) continue __lowerCamelCase = p if hasattr(UpperCamelCase__ , UpperCamelCase__ ) else equivalent_param[p] setattr(UpperCamelCase__ , UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) ) delattr(UpperCamelCase__ , UpperCamelCase__ ) return hparams, config
67
"""simple docstring""" import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase : Dict = logging.get_logger(__name__) def a__ ( snake_case__ ) -> Dict: lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" ) if "model" in sd.keys(): lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" )["""model"""] # pop unnecessary weights lowerCamelCase = [ """decoder.version""", """decoder.output_projection.weight""", ] for key in keys_to_delete: if key in sd: sd.pop(snake_case__ ) lowerCamelCase = { """decoder.project_in_dim.weight""": """decoder.project_in.weight""", """decoder.project_out_dim.weight""": """decoder.project_out.weight""", """decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""", """decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""", } for old_key, new_key in keys_to_rename.items(): if old_key in sd: lowerCamelCase = sd.pop(snake_case__ ) lowerCamelCase = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: lowerCamelCase = sd[key] # We split QKV in separate Q,K,V lowerCamelCase = key.replace(""".qkv_proj.""" , """.q_proj.""" ) lowerCamelCase = key.replace(""".qkv_proj.""" , """.k_proj.""" ) lowerCamelCase = key.replace(""".qkv_proj.""" , """.v_proj.""" ) lowerCamelCase = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 lowerCamelCase , lowerCamelCase , lowerCamelCase = torch.split(snake_case__ , depth // 3 , dim=0 ) lowerCamelCase = q lowerCamelCase = k lowerCamelCase = v del sd[key] return sd @torch.no_grad() def a__ ( snake_case__ , snake_case__ , snake_case__=None ) -> Tuple: lowerCamelCase = load_checkpoint(snake_case__ ) if config is not None: lowerCamelCase = OPTConfig.from_pretrained(snake_case__ ) else: lowerCamelCase = OPTConfig() lowerCamelCase = OPTModel(snake_case__ ).half().eval() model.load_state_dict(snake_case__ ) # Check results Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) model.save_pretrained(snake_case__ ) if __name__ == "__main__": lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--fairseq_path""", type=str, help=( """path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:""" """ https://huggingface.co/models?other=opt_metasq""" ), ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""") lowerCAmelCase : Optional[Any] = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
291
0
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a__ : """simple docstring""" def __init__( self , lowercase , lowercase=13 , lowercase=32 , lowercase=3 , lowercase=4 , lowercase=[10, 20, 30, 40] , lowercase=[2, 2, 3, 2] , lowercase=True , lowercase=True , lowercase=37 , lowercase="gelu" , lowercase=10 , lowercase=0.02 , lowercase=["stage2", "stage3", "stage4"] , lowercase=[2, 3, 4] , lowercase=None , ) -> List[str]: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = num_channels A__ = num_stages A__ = hidden_sizes A__ = depths A__ = is_training A__ = use_labels A__ = intermediate_size A__ = hidden_act A__ = num_labels A__ = initializer_range A__ = out_features A__ = out_indices A__ = scope def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.num_labels ) A__ = self.get_config() return config, pixel_values, labels def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowercase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = ConvNextModel(config=lowercase ) model.to(lowercase ) model.eval() A__ = model(lowercase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' A__ = ConvNextForImageClassification(lowercase ) model.to(lowercase ) model.eval() A__ = model(lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' A__ = ConvNextBackbone(config=lowercase ) model.to(lowercase ) model.eval() A__ = model(lowercase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A__ = None A__ = ConvNextBackbone(config=lowercase ) model.to(lowercase ) model.eval() A__ = model(lowercase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class a__ ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) __lowerCamelCase = ( {'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification} if is_torch_available() else {} ) __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = ConvNextModelTester(self ) A__ = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 ) def UpperCamelCase ( self ) -> Any: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' return @unittest.skip(reason="ConvNext does not use inputs_embeds" ) def UpperCamelCase ( self ) -> str: '''simple docstring''' pass @unittest.skip(reason="ConvNext does not support input and output embeddings" ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' pass @unittest.skip(reason="ConvNext does not use feedforward chunking" ) def UpperCamelCase ( self ) -> str: '''simple docstring''' pass def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowercase ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowercase ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowercase ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' def check_hidden_states_output(lowercase , lowercase , lowercase ): A__ = model_class(lowercase ) model.to(lowercase ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(lowercase , lowercase ) ) A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A__ = self.model_tester.num_stages self.assertEqual(len(lowercase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(lowercase , lowercase , lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(lowercase , lowercase , lowercase ) def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase ) @slow def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = ConvNextModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) def lowerCAmelCase__ ( ) -> Union[str, Any]: '''simple docstring''' A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class a__ ( unittest.TestCase ): """simple docstring""" @cached_property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None @slow def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(lowercase ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=lowercase , return_tensors="pt" ).to(lowercase ) # forward pass with torch.no_grad(): A__ = model(**lowercase ) # verify the logits A__ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowercase ) A__ = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4 ) ) @require_torch class a__ ( unittest.TestCase , snake_case ): """simple docstring""" __lowerCamelCase = (ConvNextBackbone,) if is_torch_available() else () __lowerCamelCase = ConvNextConfig __lowerCamelCase = False def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = ConvNextModelTester(self )
68
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class __magic_name__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = tempfile.mkdtemp() # fmt: off lowerCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""] # fmt: on lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) lowerCamelCase = { """do_resize""": True, """size""": {"""height""": 18, """width""": 18}, """do_normalize""": True, """image_mean""": [0.5, 0.5, 0.5], """image_std""": [0.5, 0.5, 0.5], } lowerCamelCase = os.path.join(self.tmpdirname , _a ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(_a , _a ) def _lowerCAmelCase ( self , **_a ): """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **_a ) def _lowerCAmelCase ( self , **_a ): """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a ) def _lowerCAmelCase ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_tokenizer() lowerCamelCase = self.get_image_processor() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowerCamelCase = self.get_image_processor(do_normalize=_a , padding_value=1.0 ) lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = self.prepare_image_inputs() lowerCamelCase = image_processor(_a , return_tensors="""np""" ) lowerCamelCase = processor(images=_a , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = """lower newer""" lowerCamelCase = processor(text=_a ) lowerCamelCase = tokenizer(_a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = """lower newer""" lowerCamelCase = self.prepare_image_inputs() lowerCamelCase = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with self.assertRaises(_a ): processor() def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase = processor.batch_decode(_a ) lowerCamelCase = tokenizer.batch_decode(_a ) self.assertListEqual(_a , _a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = """lower newer""" lowerCamelCase = self.prepare_image_inputs() lowerCamelCase = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
291
0
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> str: if not (isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(UpperCAmelCase , UpperCAmelCase )): raise ValueError('longest_common_substring() takes two strings for inputs' ) snake_case_ = len(UpperCAmelCase ) snake_case_ = len(UpperCAmelCase ) snake_case_ = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] snake_case_ = 0 snake_case_ = 0 for i in range(1 , texta_length + 1 ): for j in range(1 , texta_length + 1 ): if texta[i - 1] == texta[j - 1]: snake_case_ = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: snake_case_ = i snake_case_ = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
69
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def a__ ( ) -> Union[str, Any]: lowerCamelCase = ArgumentParser( description=( """PyTorch TPU distributed training launch """ """helper utility that will spawn up """ """multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=snake_case__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=snake_case__ , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=snake_case__ ) return parser.parse_args() def a__ ( ) -> List[str]: lowerCamelCase = parse_args() # Import training_script as a module. lowerCamelCase = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowerCamelCase = script_fpath.stem lowerCamelCase = importlib.import_module(snake_case__ ) # Patch sys.argv lowerCamelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
291
0
'''simple docstring''' import os import string import sys A__ : str =1 << 8 A__ : Optional[int] ={ '''tab''': ord('''\t'''), '''newline''': ord('''\r'''), '''esc''': 27, '''up''': 65 + ARROW_KEY_FLAG, '''down''': 66 + ARROW_KEY_FLAG, '''right''': 67 + ARROW_KEY_FLAG, '''left''': 68 + ARROW_KEY_FLAG, '''mod_int''': 91, '''undefined''': sys.maxsize, '''interrupt''': 3, '''insert''': 50, '''delete''': 51, '''pg_up''': 53, '''pg_down''': 54, } A__ : Optional[int] =KEYMAP['''up'''] A__ : Tuple =KEYMAP['''left'''] if sys.platform == "win32": A__ : int =[] A__ : int ={ b'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, b'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, b'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, b'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, b'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, b'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, b'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, b'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, } for i in range(10): A__ : List[Any] =ord(str(i)) def UpperCamelCase__ ( ): """simple docstring""" if os.name == "nt": import msvcrt _lowerCAmelCase = """mbcs""" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(lowerCAmelCase ) == 0: # Read the keystroke _lowerCAmelCase = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): _lowerCAmelCase = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: _lowerCAmelCase = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) ) WIN_CH_BUFFER.append(lowerCAmelCase ) if ord(lowerCAmelCase ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(1_26 ) ) _lowerCAmelCase = chr(KEYMAP["""esc"""] ) except KeyError: _lowerCAmelCase = cha[1] else: _lowerCAmelCase = ch.decode(lowerCAmelCase ) else: _lowerCAmelCase = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty _lowerCAmelCase = sys.stdin.fileno() _lowerCAmelCase = termios.tcgetattr(lowerCAmelCase ) try: tty.setraw(lowerCAmelCase ) _lowerCAmelCase = sys.stdin.read(1 ) finally: termios.tcsetattr(lowerCAmelCase , termios.TCSADRAIN , lowerCAmelCase ) return ch def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = get_raw_chars() if ord(lowerCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(lowerCAmelCase ) == KEYMAP["esc"]: _lowerCAmelCase = get_raw_chars() if ord(lowerCAmelCase ) == KEYMAP["mod_int"]: _lowerCAmelCase = get_raw_chars() if ord(lowerCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowerCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(lowerCAmelCase ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
70
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : List[str] = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "sew-d" def __init__( self , _a=32 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a=2 , _a=512 , _a=256 , _a=True , _a=True , _a=("p2c", "c2p") , _a="layer_norm" , _a="gelu_python" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=0.02 , _a=1e-7 , _a=1e-5 , _a="group" , _a="gelu" , _a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _a=False , _a=128 , _a=16 , _a=True , _a=0.05 , _a=10 , _a=2 , _a=0.0 , _a=10 , _a=0 , _a="mean" , _a=False , _a=False , _a=256 , _a=0 , _a=1 , _a=2 , **_a , ): """simple docstring""" super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a ) lowerCamelCase = hidden_size lowerCamelCase = feat_extract_norm lowerCamelCase = feat_extract_activation lowerCamelCase = list(_a ) lowerCamelCase = list(_a ) lowerCamelCase = list(_a ) lowerCamelCase = conv_bias lowerCamelCase = num_conv_pos_embeddings lowerCamelCase = num_conv_pos_embedding_groups lowerCamelCase = len(self.conv_dim ) lowerCamelCase = num_hidden_layers lowerCamelCase = intermediate_size lowerCamelCase = squeeze_factor lowerCamelCase = max_position_embeddings lowerCamelCase = position_buckets lowerCamelCase = share_att_key lowerCamelCase = relative_attention lowerCamelCase = norm_rel_ebd lowerCamelCase = list(_a ) lowerCamelCase = hidden_act lowerCamelCase = num_attention_heads lowerCamelCase = hidden_dropout lowerCamelCase = attention_dropout lowerCamelCase = activation_dropout lowerCamelCase = feat_proj_dropout lowerCamelCase = final_dropout lowerCamelCase = layer_norm_eps lowerCamelCase = feature_layer_norm_eps lowerCamelCase = initializer_range lowerCamelCase = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase = apply_spec_augment lowerCamelCase = mask_time_prob lowerCamelCase = mask_time_length lowerCamelCase = mask_time_min_masks lowerCamelCase = mask_feature_prob lowerCamelCase = mask_feature_length lowerCamelCase = mask_feature_min_masks # ctc loss lowerCamelCase = ctc_loss_reduction lowerCamelCase = ctc_zero_infinity # sequence classification lowerCamelCase = use_weighted_layer_sum lowerCamelCase = classifier_proj_size @property def _lowerCAmelCase ( self ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
291
0
import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __A ( a , unittest.TestCase ): """simple docstring""" UpperCamelCase__ : Optional[Any] =OpenAIGPTTokenizer UpperCamelCase__ : Any =OpenAIGPTTokenizerFast UpperCamelCase__ : Dict =True UpperCamelCase__ : List[str] =False def __lowercase ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __UpperCamelCase : Any =[ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] __UpperCamelCase : Optional[Any] =dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) ) __UpperCamelCase : List[str] =['#version: 0.2', 'l o', 'lo w', 'e r</w>', ''] __UpperCamelCase : List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase : str =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' ) as fp: fp.write(json.dumps(lowerCamelCase__ ) ) with open(self.merges_file , 'w' ) as fp: fp.write('\n'.join(lowerCamelCase__ ) ) def __lowercase ( self , lowerCamelCase__ ): """simple docstring""" return "lower newer", "lower newer" def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Tuple =OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) __UpperCamelCase : Any ='lower' __UpperCamelCase : Any =['low', 'er</w>'] __UpperCamelCase : Optional[int] =tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) __UpperCamelCase : List[Any] =tokens + ['<unk>'] __UpperCamelCase : Optional[Any] =[14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ ) def __lowercase ( self , lowerCamelCase__=15 ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __UpperCamelCase : str =self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) # Simple input __UpperCamelCase : str ='This is a simple input' __UpperCamelCase : str =['This is a simple input 1', 'This is a simple input 2'] __UpperCamelCase : Union[str, Any] =('This is a simple input', 'This is a pair') __UpperCamelCase : List[Any] =[ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(lowerCamelCase__ , tokenizer_r.encode , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' ) # Simple input self.assertRaises(lowerCamelCase__ , tokenizer_r.encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' ) # Simple input self.assertRaises( lowerCamelCase__ , tokenizer_r.batch_encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' , ) # Pair input self.assertRaises(lowerCamelCase__ , tokenizer_r.encode , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' ) # Pair input self.assertRaises(lowerCamelCase__ , tokenizer_r.encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' ) # Pair input self.assertRaises( lowerCamelCase__ , tokenizer_r.batch_encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' , ) def __lowercase ( self ): """simple docstring""" pass @require_ftfy @require_spacy @require_tokenizers class __A ( a ): """simple docstring""" pass
71
"""simple docstring""" from sklearn.metrics import recall_score import datasets lowerCAmelCase : Any = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ lowerCAmelCase : Any = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ lowerCAmelCase : Any = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def _lowerCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None , _a="warn" , ): """simple docstring""" lowerCamelCase = recall_score( _a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a , zero_division=_a , ) return {"recall": float(_a ) if score.size == 1 else score}
291
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''', '''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''', } class __snake_case ( _lowercase): snake_case__ : List[Any] = "roberta" def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[int]=5_0_2_6_5 , __lowerCAmelCase : List[str]=7_6_8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[Any]=1_2 , __lowerCAmelCase : Union[str, Any]=3_0_7_2 , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Dict=5_1_2 , __lowerCAmelCase : str=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[Any]=1E-12 , __lowerCAmelCase : str=1 , __lowerCAmelCase : List[Any]=0 , __lowerCAmelCase : str=2 , __lowerCAmelCase : Tuple="absolute" , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[Any] , ): """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) _lowerCamelCase : Dict = vocab_size _lowerCamelCase : List[str] = hidden_size _lowerCamelCase : Any = num_hidden_layers _lowerCamelCase : List[str] = num_attention_heads _lowerCamelCase : Dict = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Optional[Any] = hidden_dropout_prob _lowerCamelCase : Dict = attention_probs_dropout_prob _lowerCamelCase : Tuple = max_position_embeddings _lowerCamelCase : Any = type_vocab_size _lowerCamelCase : Optional[Any] = initializer_range _lowerCamelCase : Optional[Any] = layer_norm_eps _lowerCamelCase : int = position_embedding_type _lowerCamelCase : List[Any] = use_cache _lowerCamelCase : Any = classifier_dropout class __snake_case ( _lowercase): @property def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" if self.task == "multiple-choice": _lowerCamelCase : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _lowerCamelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
72
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = dataset lowerCamelCase = process lowerCamelCase = params def __len__( self ): """simple docstring""" return len(self.dataset ) def __getitem__( self , _a ): """simple docstring""" lowerCamelCase = self.dataset[i] lowerCamelCase = self.process(_a , **self.params ) return processed class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a , _a , _a=None ): """simple docstring""" lowerCamelCase = loader lowerCamelCase = infer lowerCamelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether lowerCamelCase = None lowerCamelCase = loader_batch_size # Internal bookkeeping lowerCamelCase = None lowerCamelCase = None def __len__( self ): """simple docstring""" return len(self.loader ) def __iter__( self ): """simple docstring""" lowerCamelCase = iter(self.loader ) return self def _lowerCAmelCase ( self ): """simple docstring""" if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice lowerCamelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) lowerCamelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(_a , _a ): # Convert ModelOutput to tuple first lowerCamelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_a , _a ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around lowerCamelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. lowerCamelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 lowerCamelCase = self._loader_batch_data.__class__(_a ) self._loader_batch_index += 1 return result def _lowerCAmelCase ( self ): """simple docstring""" if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch lowerCamelCase = next(self.iterator ) lowerCamelCase = self.infer(_a , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_a , torch.Tensor ): lowerCamelCase = processed else: lowerCamelCase = list(processed.keys() )[0] lowerCamelCase = processed[key] if isinstance(_a , _a ): lowerCamelCase = len(_a ) else: lowerCamelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase = observed_batch_size # Setting internal index to unwrap the batch lowerCamelCase = processed lowerCamelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a , _a , _a=None ): """simple docstring""" super().__init__(_a , _a , _a ) def __iter__( self ): """simple docstring""" lowerCamelCase = iter(self.loader ) lowerCamelCase = None return self def _lowerCAmelCase ( self ): """simple docstring""" if self.subiterator is None: lowerCamelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item lowerCamelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators lowerCamelCase = self.infer(next(self.iterator ) , **self.params ) lowerCamelCase = next(self.subiterator ) return processed class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __iter__( self ): """simple docstring""" lowerCamelCase = iter(self.loader ) return self def _lowerCAmelCase ( self ): """simple docstring""" # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. lowerCamelCase = False lowerCamelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: lowerCamelCase = self.loader_batch_item() lowerCamelCase = item.pop("""is_last""" ) accumulator.append(_a ) if is_last: return accumulator while not is_last: lowerCamelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_a , torch.Tensor ): lowerCamelCase = processed else: lowerCamelCase = list(processed.keys() )[0] lowerCamelCase = processed[key] if isinstance(_a , _a ): lowerCamelCase = len(_a ) else: lowerCamelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase = observed_batch_size lowerCamelCase = processed lowerCamelCase = 0 while self._loader_batch_index < self.loader_batch_size: lowerCamelCase = self.loader_batch_item() lowerCamelCase = item.pop("""is_last""" ) accumulator.append(_a ) if is_last: return accumulator else: lowerCamelCase = processed lowerCamelCase = item.pop("""is_last""" ) accumulator.append(_a ) return accumulator class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a ): """simple docstring""" lowerCamelCase = dataset lowerCamelCase = key def __len__( self ): """simple docstring""" return len(self.dataset ) def __getitem__( self , _a ): """simple docstring""" return self.dataset[i][self.key] class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = dataset lowerCamelCase = keya lowerCamelCase = keya def __len__( self ): """simple docstring""" return len(self.dataset ) def __getitem__( self , _a ): """simple docstring""" return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
291
0
from __future__ import annotations from random import random class A_ : def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : int | None = None): __lowerCamelCase : Union[str, Any] = value __lowerCamelCase : int = random() __lowerCamelCase : Node | None = None __lowerCamelCase : Node | None = None def __repr__( self : List[Any]): from pprint import pformat if self.left is None and self.right is None: return F"'{self.value}: {self.prior:.5}'" else: return pformat( {F"{self.value}: {self.prior:.5}": (self.left, self.right)} ,indent=1) def __str__( self : Union[str, Any]): __lowerCamelCase : Optional[int] = str(self.value) + ' ' __lowerCamelCase : Optional[Any] = str(self.left or '') __lowerCamelCase : List[Any] = str(self.right or '') return value + left + right def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> tuple[Node | None, Node | None]: if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: __lowerCamelCase , __lowerCamelCase : Union[str, Any] = split(root.left , lowerCamelCase__ ) return left, root else: __lowerCamelCase , __lowerCamelCase : List[str] = split(root.right , lowerCamelCase__ ) return root, right def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Node | None: if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: __lowerCamelCase : Optional[Any] = merge(left.right , lowerCamelCase__ ) return left else: __lowerCamelCase : List[str] = merge(lowerCamelCase__ , right.left ) return right def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Node | None: __lowerCamelCase : List[Any] = Node(lowerCamelCase__ ) __lowerCamelCase , __lowerCamelCase : Optional[int] = split(lowerCamelCase__ , lowerCamelCase__ ) return merge(merge(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Node | None: __lowerCamelCase , __lowerCamelCase : List[str] = split(lowerCamelCase__ , value - 1 ) __lowerCamelCase , __lowerCamelCase : Any = split(lowerCamelCase__ , lowerCamelCase__ ) return merge(lowerCamelCase__ , lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None: if not root: # None return else: inorder(root.left ) print(root.value , end=',' ) inorder(root.right ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Node | None: for arg in args.split(): if arg[0] == "+": __lowerCamelCase : str = insert(lowerCamelCase__ , int(arg[1:] ) ) elif arg[0] == "-": __lowerCamelCase : List[Any] = erase(lowerCamelCase__ , int(arg[1:] ) ) else: print('Unknown command' ) return root def SCREAMING_SNAKE_CASE__ ( ) -> None: __lowerCamelCase : int = None print( 'enter numbers to create a tree, + value to add value into treap, ' '- value to erase all nodes with value. \'q\' to quit. ' ) __lowerCamelCase : Optional[int] = input() while args != "q": __lowerCamelCase : Optional[int] = interact_treap(lowerCamelCase__ , lowerCamelCase__ ) print(lowerCamelCase__ ) __lowerCamelCase : int = input() print('good by!' ) if __name__ == "__main__": import doctest doctest.testmod() main()
73
"""simple docstring""" def a__ ( snake_case__ ) -> bool: lowerCamelCase = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def a__ ( snake_case__ = 50_00 ) -> int: lowerCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , snake_case__ )] for i, pentagonal_i in enumerate(snake_case__ ): for j in range(snake_case__ , len(snake_case__ ) ): lowerCamelCase = pentagonal_nums[j] lowerCamelCase = pentagonal_i + pentagonal_j lowerCamelCase = pentagonal_j - pentagonal_i if is_pentagonal(snake_case__ ) and is_pentagonal(snake_case__ ): return b return -1 if __name__ == "__main__": print(F"""{solution() = }""")
291
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_nllb_moe''': [ '''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NllbMoeConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NllbMoeForConditionalGeneration''', '''NllbMoeModel''', '''NllbMoePreTrainedModel''', '''NllbMoeTop2Router''', '''NllbMoeSparseMLP''', ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
74
"""simple docstring""" from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging lowerCAmelCase : Tuple = logging.get_logger(__name__) def a__ ( snake_case__ , snake_case__ ) -> Tuple: try: with open(snake_case__ , """rb""" ) as flax_state_f: lowerCamelCase = from_bytes(snake_case__ , flax_state_f.read() ) except UnpicklingError as e: try: with open(snake_case__ ) as f: if f.read().startswith("""version""" ): raise OSError( """You seem to have cloned a repository without having git-lfs installed. Please""" """ install git-lfs and run `git lfs install` followed by `git lfs pull` in the""" """ folder you cloned.""" ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def a__ ( snake_case__ , snake_case__ ) -> Tuple: try: import torch # noqa: F401 except ImportError: logger.error( """Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights lowerCamelCase = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) lowerCamelCase = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) lowerCamelCase = """""" lowerCamelCase = flatten_dict(snake_case__ , sep=""".""" ) lowerCamelCase = pt_model.state_dict() # keep track of unexpected & missing keys lowerCamelCase = [] lowerCamelCase = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCamelCase = flax_key_tuple.split(""".""" ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""] lowerCamelCase = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""] lowerCamelCase = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(snake_case__ ): lowerCamelCase = ( flax_key_tuple_string.replace("""_0""" , """.0""" ) .replace("""_1""" , """.1""" ) .replace("""_2""" , """.2""" ) .replace("""_3""" , """.3""" ) .replace("""_4""" , """.4""" ) .replace("""_5""" , """.5""" ) .replace("""_6""" , """.6""" ) .replace("""_7""" , """.7""" ) .replace("""_8""" , """.8""" ) .replace("""_9""" , """.9""" ) ) lowerCamelCase = """.""".join(snake_case__ ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict lowerCamelCase = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor lowerCamelCase = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list lowerCamelCase = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) if len(snake_case__ ) > 0: logger.warning( F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' """ use it for predictions and inference.""" ) return pt_model
291
0
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 a_ : str = sys.version_info >= (3, 10) def a_ ( __snake_case : Union[str, Any]=None , __snake_case : Union[str, Any]=None ) -> int: """simple docstring""" return field(default_factory=lambda: default , metadata=__snake_case ) @dataclass class __UpperCamelCase : lowercase : int lowercase : float lowercase : str lowercase : bool @dataclass class __UpperCamelCase : lowercase : int =42 lowercase : str =field(default='toto' , metadata={'help': 'help message'} ) @dataclass class __UpperCamelCase : lowercase : bool =False lowercase : bool =True lowercase : Optional[bool] =None class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Union[str, Any] ='titi' lowercase : Union[str, Any] ='toto' class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] ='titi' lowercase : Dict ='toto' lowercase : int =42 @dataclass class __UpperCamelCase : lowercase : BasicEnum ="toto" def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =BasicEnum(self.foo ) @dataclass class __UpperCamelCase : lowercase : MixedTypeEnum ="toto" def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =MixedTypeEnum(self.foo ) @dataclass class __UpperCamelCase : lowercase : Optional[int] =None lowercase : Optional[float] =field(default=lowerCamelCase__ , metadata={'help': 'help message'} ) lowercase : Optional[str] =None lowercase : Optional[List[str]] =list_field(default=[] ) lowercase : Optional[List[int]] =list_field(default=[] ) @dataclass class __UpperCamelCase : lowercase : List[int] =list_field(default=[] ) lowercase : List[int] =list_field(default=[1, 2, 3] ) lowercase : List[str] =list_field(default=['Hallo', 'Bonjour', 'Hello'] ) lowercase : List[float] =list_field(default=[0.1, 0.2, 0.3] ) @dataclass class __UpperCamelCase : lowercase : List[int] =field() lowercase : str =field() lowercase : BasicEnum =field() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =BasicEnum(self.required_enum ) @dataclass class __UpperCamelCase : lowercase : int lowercase : "BasicEnum" =field() lowercase : "Optional[bool]" =None lowercase : "str" =field(default='toto' , metadata={'help': 'help message'} ) lowercase : "List[str]" =list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class __UpperCamelCase : lowercase : bool =False lowercase : bool =True lowercase : bool | None =None @dataclass class __UpperCamelCase : lowercase : int | None =None lowercase : float | None =field(default=lowerCamelCase__ , metadata={'help': 'help message'} ) lowercase : str | None =None lowercase : list[str] | None =list_field(default=[] ) lowercase : list[int] | None =list_field(default=[] ) class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" self.assertEqual(len(a._actions ), len(b._actions ) ) for x, y in zip(a._actions, b._actions ): lowerCamelCase_ ={k: v for k, v in vars(lowerCAmelCase ).items() if k != '''container'''} lowerCamelCase_ ={k: v for k, v in vars(lowerCAmelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''', lowerCAmelCase ) and yy.get('''choices''', lowerCAmelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](lowerCAmelCase ), yy['''type'''](lowerCAmelCase ) ) del xx["type"], yy["type"] self.assertEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =HfArgumentParser(lowerCAmelCase ) lowerCamelCase_ =argparse.ArgumentParser() expected.add_argument('''--foo''', type=lowerCAmelCase, required=lowerCAmelCase ) expected.add_argument('''--bar''', type=lowerCAmelCase, required=lowerCAmelCase ) expected.add_argument('''--baz''', type=lowerCAmelCase, required=lowerCAmelCase ) expected.add_argument('''--flag''', type=lowerCAmelCase, default=lowerCAmelCase, const=lowerCAmelCase, nargs='''?''' ) self.argparsersEqual(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((lowerCamelCase_), ) =parser.parse_args_into_dataclasses(lowerCAmelCase, look_for_args_file=lowerCAmelCase ) self.assertFalse(example.flag ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =HfArgumentParser(lowerCAmelCase ) lowerCamelCase_ =argparse.ArgumentParser() expected.add_argument('''--foo''', default=42, type=lowerCAmelCase ) expected.add_argument('''--baz''', default='''toto''', type=lowerCAmelCase, help='''help message''' ) self.argparsersEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser() expected.add_argument('''--foo''', type=lowerCAmelCase, default=lowerCAmelCase, const=lowerCAmelCase, nargs='''?''' ) expected.add_argument('''--baz''', type=lowerCAmelCase, default=lowerCAmelCase, const=lowerCAmelCase, nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''', action='''store_false''', default=lowerCAmelCase, dest='''baz''' ) expected.add_argument('''--opt''', type=lowerCAmelCase, default=lowerCAmelCase ) lowerCamelCase_ =[WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(lowerCAmelCase ) for dataclass_type in dataclass_types: lowerCamelCase_ =HfArgumentParser(lowerCAmelCase ) self.argparsersEqual(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =parser.parse_args([] ) self.assertEqual(lowerCAmelCase, Namespace(foo=lowerCAmelCase, baz=lowerCAmelCase, opt=lowerCAmelCase ) ) lowerCamelCase_ =parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(lowerCAmelCase, Namespace(foo=lowerCAmelCase, baz=lowerCAmelCase, opt=lowerCAmelCase ) ) lowerCamelCase_ =parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(lowerCAmelCase, Namespace(foo=lowerCAmelCase, baz=lowerCAmelCase, opt=lowerCAmelCase ) ) lowerCamelCase_ =parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(lowerCAmelCase, Namespace(foo=lowerCAmelCase, baz=lowerCAmelCase, opt=lowerCAmelCase ) ) lowerCamelCase_ =parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(lowerCAmelCase, Namespace(foo=lowerCAmelCase, baz=lowerCAmelCase, opt=lowerCAmelCase ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =HfArgumentParser(lowerCAmelCase ) lowerCamelCase_ =argparse.ArgumentParser() expected.add_argument( '''--foo''', default='''toto''', choices=['''titi''', '''toto''', 42], type=make_choice_type_function(['''titi''', '''toto''', 42] ), ) self.argparsersEqual(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =parser.parse_args([] ) self.assertEqual(args.foo, '''toto''' ) lowerCamelCase_ =parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo, MixedTypeEnum.toto ) lowerCamelCase_ =parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo, '''titi''' ) lowerCamelCase_ =parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo, MixedTypeEnum.titi ) lowerCamelCase_ =parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo, 42 ) lowerCamelCase_ =parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo, MixedTypeEnum.fourtytwo ) def lowercase__ ( self ): """simple docstring""" @dataclass class __UpperCamelCase : lowercase : Literal["titi", "toto", 42] ="toto" lowerCamelCase_ =HfArgumentParser(lowerCAmelCase ) lowerCamelCase_ =argparse.ArgumentParser() expected.add_argument( '''--foo''', default='''toto''', choices=('''titi''', '''toto''', 42), type=make_choice_type_function(['''titi''', '''toto''', 42] ), ) self.argparsersEqual(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =parser.parse_args([] ) self.assertEqual(args.foo, '''toto''' ) lowerCamelCase_ =parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo, '''titi''' ) lowerCamelCase_ =parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo, 42 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =HfArgumentParser(lowerCAmelCase ) lowerCamelCase_ =argparse.ArgumentParser() expected.add_argument('''--foo_int''', nargs='''+''', default=[], type=lowerCAmelCase ) expected.add_argument('''--bar_int''', nargs='''+''', default=[1, 2, 3], type=lowerCAmelCase ) expected.add_argument('''--foo_str''', nargs='''+''', default=['''Hallo''', '''Bonjour''', '''Hello'''], type=lowerCAmelCase ) expected.add_argument('''--foo_float''', nargs='''+''', default=[0.1, 0.2, 0.3], type=lowerCAmelCase ) self.argparsersEqual(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =parser.parse_args([] ) self.assertEqual( lowerCAmelCase, Namespace(foo_int=[], bar_int=[1, 2, 3], foo_str=['''Hallo''', '''Bonjour''', '''Hello'''], foo_float=[0.1, 0.2, 0.3] ), ) lowerCamelCase_ =parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(lowerCAmelCase, Namespace(foo_int=[1], bar_int=[2, 3], foo_str=['''a''', '''b''', '''c'''], foo_float=[0.1, 0.7] ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser() expected.add_argument('''--foo''', default=lowerCAmelCase, type=lowerCAmelCase ) expected.add_argument('''--bar''', default=lowerCAmelCase, type=lowerCAmelCase, help='''help message''' ) expected.add_argument('''--baz''', default=lowerCAmelCase, type=lowerCAmelCase ) expected.add_argument('''--ces''', nargs='''+''', default=[], type=lowerCAmelCase ) expected.add_argument('''--des''', nargs='''+''', default=[], type=lowerCAmelCase ) lowerCamelCase_ =[OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(lowerCAmelCase ) for dataclass_type in dataclass_types: lowerCamelCase_ =HfArgumentParser(lowerCAmelCase ) self.argparsersEqual(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =parser.parse_args([] ) self.assertEqual(lowerCAmelCase, Namespace(foo=lowerCAmelCase, bar=lowerCAmelCase, baz=lowerCAmelCase, ces=[], des=[] ) ) lowerCamelCase_ =parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(lowerCAmelCase, Namespace(foo=12, bar=3.1_4, baz='''42''', ces=['''a''', '''b''', '''c'''], des=[1, 2, 3] ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =HfArgumentParser(lowerCAmelCase ) lowerCamelCase_ =argparse.ArgumentParser() expected.add_argument('''--required_list''', nargs='''+''', type=lowerCAmelCase, required=lowerCAmelCase ) expected.add_argument('''--required_str''', type=lowerCAmelCase, required=lowerCAmelCase ) expected.add_argument( '''--required_enum''', type=make_choice_type_function(['''titi''', '''toto'''] ), choices=['''titi''', '''toto'''], required=lowerCAmelCase, ) self.argparsersEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =HfArgumentParser(lowerCAmelCase ) lowerCamelCase_ =argparse.ArgumentParser() expected.add_argument('''--foo''', type=lowerCAmelCase, required=lowerCAmelCase ) expected.add_argument( '''--required_enum''', type=make_choice_type_function(['''titi''', '''toto'''] ), choices=['''titi''', '''toto'''], required=lowerCAmelCase, ) expected.add_argument('''--opt''', type=lowerCAmelCase, default=lowerCAmelCase ) expected.add_argument('''--baz''', default='''toto''', type=lowerCAmelCase, help='''help message''' ) expected.add_argument('''--foo_str''', nargs='''+''', default=['''Hallo''', '''Bonjour''', '''Hello'''], type=lowerCAmelCase ) self.argparsersEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =HfArgumentParser(lowerCAmelCase ) lowerCamelCase_ ={ '''foo''': 12, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } lowerCamelCase_ =parser.parse_dict(lowerCAmelCase )[0] lowerCamelCase_ =BasicExample(**lowerCAmelCase ) self.assertEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =HfArgumentParser(lowerCAmelCase ) lowerCamelCase_ ={ '''foo''': 12, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(lowerCAmelCase, parser.parse_dict, lowerCAmelCase, allow_extra_keys=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =HfArgumentParser(lowerCAmelCase ) lowerCamelCase_ ={ '''foo''': 12, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase_ =os.path.join(lowerCAmelCase, '''temp_json''' ) os.mkdir(lowerCAmelCase ) with open(temp_local_path + '''.json''', '''w+''' ) as f: json.dump(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] lowerCamelCase_ =BasicExample(**lowerCAmelCase ) self.assertEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =HfArgumentParser(lowerCAmelCase ) lowerCamelCase_ ={ '''foo''': 12, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase_ =os.path.join(lowerCAmelCase, '''temp_yaml''' ) os.mkdir(lowerCAmelCase ) with open(temp_local_path + '''.yaml''', '''w+''' ) as f: yaml.dump(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] lowerCamelCase_ =BasicExample(**lowerCAmelCase ) self.assertEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =HfArgumentParser(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase )
75
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: lowerCAmelCase : int = None lowerCAmelCase : Tuple = logging.get_logger(__name__) lowerCAmelCase : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Union[str, Any] = { """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", }, """tokenizer_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""", }, } lowerCAmelCase : Optional[int] = { """xlnet-base-cased""": None, """xlnet-large-cased""": None, } lowerCAmelCase : Union[str, Any] = """▁""" # Segments (not really needed) lowerCAmelCase : str = 0 lowerCAmelCase : Optional[int] = 1 lowerCAmelCase : Tuple = 2 lowerCAmelCase : Optional[Any] = 3 lowerCAmelCase : List[Any] = 4 class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = "left" __UpperCamelCase = XLNetTokenizer def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , **_a , ): """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token super().__init__( vocab_file=_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , ) lowerCamelCase = 3 lowerCamelCase = do_lower_case lowerCamelCase = remove_space lowerCamelCase = keep_accents lowerCamelCase = vocab_file lowerCamelCase = False if not self.vocab_file else True def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" lowerCamelCase = [self.sep_token_id] lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" lowerCamelCase = [self.sep_token_id] lowerCamelCase = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(_a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowerCamelCase = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
291
0
from collections.abc import Callable import numpy as np def lowerCamelCase__ ( _a , _a , _a , _a , _a): SCREAMING_SNAKE_CASE : Dict = int(np.ceil((x_end - xa) / step_size)) SCREAMING_SNAKE_CASE : Tuple = np.zeros((n + 1,)) SCREAMING_SNAKE_CASE : int = ya SCREAMING_SNAKE_CASE : int = xa for k in range(_a): SCREAMING_SNAKE_CASE : Any = y[k] + step_size * ode_func(_a , y[k]) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
76
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class __magic_name__ ( unittest.TestCase ): '''simple docstring''' __UpperCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __UpperCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def _lowerCAmelCase ( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = TextaTextGenerationPipeline(model=_a , tokenizer=_a ) return generator, ["Something to write", "Something else"] def _lowerCAmelCase ( self , _a , _a ): """simple docstring""" lowerCamelCase = generator("""Something there""" ) self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) lowerCamelCase = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) lowerCamelCase = generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) with self.assertRaises(_a ): generator(4 ) @require_torch def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" ) # do_sample=False necessary for reproducibility lowerCamelCase = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] ) lowerCamelCase = 3 lowerCamelCase = generator( """Something there""" , num_return_sequences=_a , num_beams=_a , ) lowerCamelCase = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_a , _a ) lowerCamelCase = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a ) self.assertEqual( _a , [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] , ) lowerCamelCase = generator.model.config.eos_token_id lowerCamelCase = """<pad>""" lowerCamelCase = generator( ["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , ) self.assertEqual( _a , [ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] , ) @require_tf def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" ) # do_sample=False necessary for reproducibility lowerCamelCase = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] )
291
0
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class UpperCAmelCase_ ( _a , _a): @register_to_config def __init__( self , a = 7_6_8 , ) -> List[Any]: super().__init__() lowercase__ : Dict = nn.Parameter(torch.zeros(1 , a ) ) lowercase__ : Any = nn.Parameter(torch.ones(1 , a ) ) def _UpperCAmelCase ( self , a = None , a = None , ) -> int: lowercase__ : List[Any] = nn.Parameter(self.mean.to(a ).to(a ) ) lowercase__ : str = nn.Parameter(self.std.to(a ).to(a ) ) return self def _UpperCAmelCase ( self , a ) -> Any: lowercase__ : str = (embeds - self.mean) * 1.0 / self.std return embeds def _UpperCAmelCase ( self , a ) -> List[str]: lowercase__ : Union[str, Any] = (embeds * self.std) + self.mean return embeds
77
"""simple docstring""" def a__ ( snake_case__ , snake_case__ = False ) -> str: if not isinstance(snake_case__ , snake_case__ ): lowerCamelCase = F'Expected string as input, found {type(snake_case__ )}' raise ValueError(snake_case__ ) if not isinstance(snake_case__ , snake_case__ ): lowerCamelCase = F'Expected boolean as use_pascal parameter, found {type(snake_case__ )}' raise ValueError(snake_case__ ) lowerCamelCase = input_str.split("""_""" ) lowerCamelCase = 0 if use_pascal else 1 lowerCamelCase = words[start_index:] lowerCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize] lowerCamelCase = """""" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
291
0
"""simple docstring""" import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() snake_case_ = logging.get_logger() @dataclass class A_ : """simple docstring""" __UpperCamelCase = 42 __UpperCamelCase = field(default_factory=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = field(default_factory=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase__ ( self :str , lowercase_ :Any , lowercase_ :Tensor , lowercase_ :Tensor ) -> Union[str, Any]: UpperCAmelCase = len(list(m.modules() ) ) == 1 or isinstance(lowercase_ , nn.Convad ) or isinstance(lowercase_ , nn.BatchNormad ) if has_not_submodules: self.traced.append(lowercase_ ) def __call__( self :Dict , lowercase_ :Tensor ) -> List[Any]: for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(lowercase_ ) [x.remove() for x in self.handles] return self @property def UpperCAmelCase__ ( self :Optional[int] ) -> Any: # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda lowercase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class A_ : """simple docstring""" __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 1 __UpperCamelCase = field(default_factory=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = field(default_factory=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = True def __call__( self :Tuple , lowercase_ :Tensor ) -> Dict: UpperCAmelCase = Tracker(self.dest )(lowercase_ ).parametrized UpperCAmelCase = Tracker(self.src )(lowercase_ ).parametrized UpperCAmelCase = list(filter(lambda lowercase_ : type(lowercase_ ) not in self.src_skip , lowercase_ ) ) UpperCAmelCase = list(filter(lambda lowercase_ : type(lowercase_ ) not in self.dest_skip , lowercase_ ) ) if len(lowercase_ ) != len(lowercase_ ) and self.raise_if_mismatch: raise Exception( f"""Numbers of operations are different. Source module has {len(lowercase_ )} operations while""" f""" destination module has {len(lowercase_ )}.""" ) for dest_m, src_m in zip(lowercase_ , lowercase_ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"""Transfered from={src_m} to={dest_m}""" ) class A_ ( nn.Module ): """simple docstring""" def __init__( self :List[Any] , lowercase_ :nn.Module ) -> Union[str, Any]: super().__init__() UpperCAmelCase = [] # - get the stem feature_blocks.append(('conv1', model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith('block' ), f"""Unexpected layer name {k}""" UpperCAmelCase = len(lowercase_ ) + 1 feature_blocks.append((f"""res{block_index}""", v) ) UpperCAmelCase = nn.ModuleDict(lowercase_ ) def UpperCAmelCase__ ( self :str , lowercase_ :Tensor ) -> Tuple: return get_trunk_forward_outputs( lowercase_ , out_feat_keys=lowercase_ , feature_blocks=self._feature_blocks , ) class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def UpperCAmelCase__ ( self :List[Any] , lowercase_ :str ) -> str: UpperCAmelCase = x.split('-' ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__( self :Optional[int] , lowercase_ :str ) -> Callable[[], Tuple[nn.Module, Dict]]: # default to timm! if x not in self: UpperCAmelCase = self.convert_name_to_timm(lowercase_ ) UpperCAmelCase = partial(lambda: (timm.create_model(lowercase_ , pretrained=lowercase_ ).eval(), None) ) else: UpperCAmelCase = super().__getitem__(lowercase_ ) return val class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __getitem__( self :Tuple , lowercase_ :str ) -> Callable[[], nn.Module]: if "seer" in x and "in1k" not in x: UpperCAmelCase = RegNetModel else: UpperCAmelCase = RegNetForImageClassification return val def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ): for from_key, to_key in keys: UpperCAmelCase = from_state_dict[from_key].clone() print(F"""Copied key={from_key} to={to_key}""" ) return to_state_dict def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = True , ): print(F"""Converting {name}...""" ) with torch.no_grad(): UpperCAmelCase , UpperCAmelCase = from_model_func() UpperCAmelCase = our_model_func(lowercase_ ).eval() UpperCAmelCase = ModuleTransfer(src=lowercase_ , dest=lowercase_ , raise_if_mismatch=lowercase_ ) UpperCAmelCase = torch.randn((1, 3, 224, 224) ) module_transfer(lowercase_ ) if from_state_dict is not None: UpperCAmelCase = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: UpperCAmelCase = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')] UpperCAmelCase = manually_copy_vissl_head(lowercase_ , our_model.state_dict() , lowercase_ ) our_model.load_state_dict(lowercase_ ) UpperCAmelCase = our_model(lowercase_ , output_hidden_states=lowercase_ ) UpperCAmelCase = ( our_outputs.logits if isinstance(lowercase_ , lowercase_ ) else our_outputs.last_hidden_state ) UpperCAmelCase = from_model(lowercase_ ) UpperCAmelCase = from_output[-1] if type(lowercase_ ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: UpperCAmelCase = our_outputs.hidden_states[-1] assert torch.allclose(lowercase_ , lowercase_ ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=lowercase_ , ) UpperCAmelCase = 224 if 'seer' not in name else 384 # we can use the convnext one UpperCAmelCase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=lowercase_ ) image_processor.push_to_hub( repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=lowercase_ , ) print(F"""Pushed {name}""" ) def _lowerCAmelCase ( lowercase_ , lowercase_ = None , lowercase_ = True ): UpperCAmelCase = 'imagenet-1k-id2label.json' UpperCAmelCase = 1000 UpperCAmelCase = (1, num_labels) UpperCAmelCase = 'huggingface/label-files' UpperCAmelCase = num_labels UpperCAmelCase = json.load(open(cached_download(hf_hub_url(lowercase_ , lowercase_ , repo_type='dataset' ) ) , 'r' ) ) UpperCAmelCase = {int(lowercase_ ): v for k, v in idalabel.items()} UpperCAmelCase = idalabel UpperCAmelCase = {v: k for k, v in idalabel.items()} UpperCAmelCase = partial(lowercase_ , num_labels=lowercase_ , idalabel=lowercase_ , labelaid=lowercase_ ) UpperCAmelCase = { 'regnet-x-002': ImageNetPreTrainedConfig( depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='x' ), 'regnet-x-004': ImageNetPreTrainedConfig( depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='x' ), 'regnet-x-006': ImageNetPreTrainedConfig( depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='x' ), 'regnet-x-008': ImageNetPreTrainedConfig( depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='x' ), 'regnet-x-016': ImageNetPreTrainedConfig( depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='x' ), 'regnet-x-032': ImageNetPreTrainedConfig( depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='x' ), 'regnet-x-040': ImageNetPreTrainedConfig( depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='x' ), 'regnet-x-064': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='x' ), 'regnet-x-080': ImageNetPreTrainedConfig( depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='x' ), 'regnet-x-120': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='x' ), 'regnet-x-160': ImageNetPreTrainedConfig( depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='x' ), 'regnet-x-320': ImageNetPreTrainedConfig( depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='x' ), # y variant 'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ), 'regnet-y-004': ImageNetPreTrainedConfig( depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ), 'regnet-y-006': ImageNetPreTrainedConfig( depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ), 'regnet-y-008': ImageNetPreTrainedConfig( depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ), 'regnet-y-016': ImageNetPreTrainedConfig( depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ), 'regnet-y-032': ImageNetPreTrainedConfig( depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ), 'regnet-y-040': ImageNetPreTrainedConfig( depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ), 'regnet-y-064': ImageNetPreTrainedConfig( depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ), 'regnet-y-080': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ), 'regnet-y-120': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ), 'regnet-y-160': ImageNetPreTrainedConfig( depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ), 'regnet-y-320': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), 'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), 'regnet-y-1280-seer': RegNetConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), 'regnet-y-2560-seer': RegNetConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), 'regnet-y-10b-seer': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), # finetuned on imagenet 'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), 'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), 'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), 'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), 'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), } UpperCAmelCase = NameToOurModelFuncMap() UpperCAmelCase = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(lowercase_ , lowercase_ ) -> Tuple[nn.Module, Dict]: UpperCAmelCase = torch.hub.load_state_dict_from_url(lowercase_ , model_dir=str(lowercase_ ) , map_location='cpu' ) UpperCAmelCase = model_func() # check if we have a head, if yes add it UpperCAmelCase = files['classy_state_dict']['base_model']['model'] UpperCAmelCase = model_state_dict['trunk'] model.load_state_dict(lowercase_ ) return model.eval(), model_state_dict["heads"] # pretrained UpperCAmelCase = partial( lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) UpperCAmelCase = partial( lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) UpperCAmelCase = partial( lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) UpperCAmelCase = partial( lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , ) # IN1K finetuned UpperCAmelCase = partial( lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) UpperCAmelCase = partial( lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) UpperCAmelCase = partial( lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) UpperCAmelCase = partial( lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , ) if model_name: convert_weight_and_push( lowercase_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowercase_ , lowercase_ , ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( lowercase_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowercase_ , lowercase_ , lowercase_ , ) return config, expected_shape if __name__ == "__main__": snake_case_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported regnet* architecture,""" """ currently: regnetx-*, regnety-*. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) snake_case_ = parser.parse_args() snake_case_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
78
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch lowerCAmelCase : int = logging.get_logger(__name__) class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["pixel_values"] def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = None , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ): """simple docstring""" super().__init__(**_a ) lowerCamelCase = size if size is not None else {"""shortest_edge""": 256} lowerCamelCase = get_size_dict(_a , default_to_square=_a ) lowerCamelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" ) lowerCamelCase = do_resize lowerCamelCase = size lowerCamelCase = resample lowerCamelCase = do_center_crop lowerCamelCase = crop_size lowerCamelCase = do_rescale lowerCamelCase = rescale_factor lowerCamelCase = do_normalize lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ): """simple docstring""" lowerCamelCase = get_size_dict(_a , default_to_square=_a ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) lowerCamelCase = get_resize_output_image_size(_a , size=size["""shortest_edge"""] , default_to_square=_a ) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a , _a = None , **_a , ): """simple docstring""" lowerCamelCase = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a , _a = None , **_a ): """simple docstring""" return rescale(_a , scale=_a , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a , _a , _a = None , **_a , ): """simple docstring""" return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ): """simple docstring""" lowerCamelCase = do_resize if do_resize is not None else self.do_resize lowerCamelCase = size if size is not None else self.size lowerCamelCase = get_size_dict(_a , default_to_square=_a ) lowerCamelCase = resample if resample is not None else self.resample lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase = crop_size if crop_size is not None else self.crop_size lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" ) lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase = image_mean if image_mean is not None else self.image_mean lowerCamelCase = image_std if image_std is not None else self.image_std lowerCamelCase = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowerCamelCase = [to_numpy_array(_a ) for image in images] if do_resize: lowerCamelCase = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_center_crop: lowerCamelCase = [self.center_crop(image=_a , size=_a ) for image in images] if do_rescale: lowerCamelCase = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: lowerCamelCase = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] lowerCamelCase = [to_channel_dimension_format(_a , _a ) for image in images] lowerCamelCase = {"""pixel_values""": images} return BatchFeature(data=_a , tensor_type=_a ) def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" lowerCamelCase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_a ) != len(_a ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(_a ): lowerCamelCase = target_sizes.numpy() lowerCamelCase = [] for idx in range(len(_a ) ): lowerCamelCase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_a ) lowerCamelCase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_a ) else: lowerCamelCase = logits.argmax(dim=1 ) lowerCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
291
0
'''simple docstring''' from __future__ import annotations from typing import Any def __lowercase ( __lowercase ) -> int: '''simple docstring''' if not postfix_notation: return 0 _A = {"+", "-", "*", "/"} _A = [] for token in postfix_notation: if token in operations: _A , _A = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(__lowercase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
79
"""simple docstring""" import operator as op lowerCAmelCase : Dict = """scaler.pt""" lowerCAmelCase : Tuple = """pytorch_model""" lowerCAmelCase : Union[str, Any] = """random_states""" lowerCAmelCase : Union[str, Any] = """optimizer""" lowerCAmelCase : Dict = """scheduler""" lowerCAmelCase : int = """pytorch_model.bin""" lowerCAmelCase : str = """pytorch_model.bin.index.json""" lowerCAmelCase : Union[str, Any] = """model.safetensors""" lowerCAmelCase : List[Any] = """model.safetensors.index.json""" lowerCAmelCase : List[Any] = """1.10.2""" lowerCAmelCase : Any = """py38""" lowerCAmelCase : Optional[int] = """4.17.0""" lowerCAmelCase : str = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""] lowerCAmelCase : Tuple = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""] lowerCAmelCase : List[Any] = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""] lowerCAmelCase : List[str] = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""] lowerCAmelCase : List[str] = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""] lowerCAmelCase : Any = """2.0.1""" lowerCAmelCase : List[Any] = ["""pdsh""", """standard""", """openmpi""", """mvapich"""] lowerCAmelCase : Union[str, Any] = ["""default""", """reduce-overhead""", """max-autotune"""] lowerCAmelCase : Optional[int] = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 lowerCAmelCase : Union[str, Any] = [ """nnodes""", """nproc_per_node""", """rdzv_backend""", """rdzv_endpoint""", """rdzv_id""", """rdzv_conf""", """standalone""", """max_restarts""", """monitor_interval""", """start_method""", """role""", """module""", """m""", """no_python""", """run_path""", """log_dir""", """r""", """redirects""", """t""", """tee""", """node_rank""", """master_addr""", """master_port""", ] lowerCAmelCase : List[str] = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""] lowerCAmelCase : Optional[Any] = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
291
0
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType a__ : Optional[List[str]] = None a__ : Dict = '<' if sys.byteorder == 'little' else '>' # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image a__ : Any = [ np.dtype('|b1'), np.dtype('|u1'), np.dtype('<u2'), np.dtype('>u2'), np.dtype('<i2'), np.dtype('>i2'), np.dtype('<u4'), np.dtype('>u4'), np.dtype('<i4'), np.dtype('>i4'), np.dtype('<f4'), np.dtype('>f4'), np.dtype('<f8'), np.dtype('>f8'), ] @dataclass class lowercase_ : __UpperCAmelCase = True __UpperCAmelCase = None # Automatically constructed __UpperCAmelCase = "PIL.Image.Image" __UpperCAmelCase = pa.struct({'bytes': pa.binary(), 'path': pa.string()} ) __UpperCAmelCase = field(default='Image' , init=a__ , repr=a__ ) def __call__( self ): return self.pa_type def __a ( self , a ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) if isinstance(a , a ): UpperCamelCase__ = np.array(a ) if isinstance(a , a ): return {"path": value, "bytes": None} elif isinstance(a , a ): return {"path": None, "bytes": value} elif isinstance(a , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(a ) elif isinstance(a , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(a ) elif value.get("path" ) is not None and os.path.isfile(value["path"] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("path" )} elif value.get("bytes" ) is not None or value.get("path" ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("bytes" ), "path": value.get("path" )} else: raise ValueError( f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def __a ( self , a , a=None ): if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support decoding images, please install 'Pillow'." ) if token_per_repo_id is None: UpperCamelCase__ = {} UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"] if bytes_ is None: if path is None: raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) else: if is_local_path(a ): UpperCamelCase__ = PIL.Image.open(a ) else: UpperCamelCase__ = path.split("::" )[-1] try: UpperCamelCase__ = string_to_dict(a , config.HUB_DATASETS_URL )["repo_id"] UpperCamelCase__ = token_per_repo_id.get(a ) except ValueError: UpperCamelCase__ = None with xopen(a , "rb" , use_auth_token=a ) as f: UpperCamelCase__ = BytesIO(f.read() ) UpperCamelCase__ = PIL.Image.open(bytes_ ) else: UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def __a ( self ): from .features import Value return ( self if self.decode else { "bytes": Value("binary" ), "path": Value("string" ), } ) def __a ( self , a ): if pa.types.is_string(storage.type ): UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.binary() ) UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() ) UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("bytes" ) >= 0: UpperCamelCase__ = storage.field("bytes" ) else: UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.binary() ) if storage.type.get_field_index("path" ) >= 0: UpperCamelCase__ = storage.field("path" ) else: UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() ) UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): UpperCamelCase__ = pa.array( [encode_np_array(np.array(a ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() ) UpperCamelCase__ = pa.StructArray.from_arrays( [bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(a , self.pa_type ) def __a ( self , a ): @no_op_if_value_is_null def path_to_bytes(a ): with xopen(a , "rb" ) as f: UpperCamelCase__ = f.read() return bytes_ UpperCamelCase__ = pa.array( [ (path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) UpperCamelCase__ = pa.array( [os.path.basename(a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , ) UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(a , self.pa_type ) def _UpperCamelCase ( ) -> List[str]: '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def _UpperCamelCase ( __A ) -> bytes: '''simple docstring''' UpperCamelCase__ = BytesIO() if image.format in list_image_compression_formats(): UpperCamelCase__ = image.format else: UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF" image.save(__A , format=__A ) return buffer.getvalue() def _UpperCamelCase ( __A ) -> dict: '''simple docstring''' if hasattr(__A , "filename" ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(__A )} def _UpperCamelCase ( __A ) -> dict: '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) UpperCamelCase__ = array.dtype UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER UpperCamelCase__ = dtype.kind UpperCamelCase__ = dtype.itemsize UpperCamelCase__ = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: UpperCamelCase__ = np.dtype("|u1" ) if dtype_kind not in ["u", "i"]: raise TypeError( F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' ) if dtype is not dest_dtype: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: UpperCamelCase__ = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: UpperCamelCase__ = dtype_byteorder + dtype_kind + str(__A ) UpperCamelCase__ = np.dtype(__A ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' ) UpperCamelCase__ = PIL.Image.fromarray(array.astype(__A ) ) return {"path": None, "bytes": image_to_bytes(__A )} def _UpperCamelCase ( __A ) -> List[dict]: '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) if objs: UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(__A ) if isinstance(__A , __A ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(__A , np.ndarray ): UpperCamelCase__ = no_op_if_value_is_null(__A ) return [obj_to_image_dict_func(__A ) for obj in objs] elif isinstance(__A , PIL.Image.Image ): UpperCamelCase__ = no_op_if_value_is_null(__A ) return [obj_to_image_dict_func(__A ) for obj in objs] else: return objs else: return objs
80
"""simple docstring""" import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __magic_name__ : '''simple docstring''' def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ): """simple docstring""" lowerCamelCase = parent lowerCamelCase = batch_size lowerCamelCase = image_size lowerCamelCase = patch_size lowerCamelCase = num_channels lowerCamelCase = is_training lowerCamelCase = use_labels lowerCamelCase = hidden_size lowerCamelCase = num_hidden_layers lowerCamelCase = num_attention_heads lowerCamelCase = intermediate_size lowerCamelCase = hidden_act lowerCamelCase = hidden_dropout_prob lowerCamelCase = attention_probs_dropout_prob lowerCamelCase = type_sequence_label_size lowerCamelCase = initializer_range lowerCamelCase = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase = (image_size // patch_size) ** 2 lowerCamelCase = num_patches + 1 def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase = None if self.use_labels: lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase = self.get_config() return config, pixel_values, labels def _lowerCAmelCase ( self ): """simple docstring""" return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def _lowerCAmelCase ( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = ViTMSNModel(config=_a ) model.to(_a ) model.eval() lowerCamelCase = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = self.type_sequence_label_size lowerCamelCase = ViTMSNForImageClassification(_a ) model.to(_a ) model.eval() lowerCamelCase = model(_a , labels=_a ) print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" ) print("""Labels: {labels}""" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase = 1 lowerCamelCase = ViTMSNForImageClassification(_a ) model.to(_a ) model.eval() lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase = model(_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs lowerCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () __UpperCamelCase = ( {"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification} if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = ViTMSNModelTester(self ) lowerCamelCase = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 ) def _lowerCAmelCase ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMSN does not use inputs_embeds""" ) def _lowerCAmelCase ( self ): """simple docstring""" pass def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase = model_class(_a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_a , nn.Linear ) ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase = model_class(_a ) lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase = [*signature.parameters.keys()] lowerCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) @slow def _lowerCAmelCase ( self ): """simple docstring""" for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase = ViTMSNModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def a__ ( ) -> Any: lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowerCAmelCase ( self ): """simple docstring""" return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None @slow def _lowerCAmelCase ( self ): """simple docstring""" torch.manual_seed(2 ) lowerCamelCase = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a ) lowerCamelCase = self.default_image_processor lowerCamelCase = prepare_img() lowerCamelCase = image_processor(images=_a , return_tensors="""pt""" ).to(_a ) # forward pass with torch.no_grad(): lowerCamelCase = model(**_a ) # verify the logits lowerCamelCase = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , _a ) lowerCamelCase = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(_a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
291
0
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__) def _A ( lowercase , lowercase ): """simple docstring""" def run_func(lowercase ): @wraps(lowercase ) def run_in_eager_mode(*lowercase , **lowercase ): return func(*lowercase , **lowercase ) @wraps(lowercase ) @tf.function(experimental_compile=lowercase ) def run_in_graph_mode(*lowercase , **lowercase ): return func(*lowercase , **lowercase ) if do_eager_mode is True: if use_xla is not False: raise ValueError( '''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' ) return run_in_eager_mode else: return run_in_graph_mode return run_func def _A ( lowercase , lowercase , lowercase ): """simple docstring""" a =random.Random() a =[rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(lowercase , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase = 42 __lowerCAmelCase = 42 __lowerCAmelCase = "TensorFlow" @property def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: return tf.__version__ def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> float: # initialize GPU on separate process a =self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) a =self._prepare_inference_func(__A , __A , __A ) return self._measure_speed(_inference ) def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> float: a =self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) a =self._prepare_train_func(__A , __A , __A ) return self._measure_speed(_train ) def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> [Memory, Optional[MemorySummary]]: # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __A ) a =self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) a =self._prepare_inference_func(__A , __A , __A ) return self._measure_memory(_inference ) def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> [Memory, Optional[MemorySummary]]: if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __A ) a =self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) a =self._prepare_train_func(__A , __A , __A ) return self._measure_memory(_train ) def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> Callable[[], None]: a =self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError('''Mixed precision is currently not supported.''' ) a =( hasattr(__A , '''architectures''' ) and isinstance(config.architectures , __A ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: a ='''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model a =__import__('''transformers''' , fromlist=[model_class] ) a =getattr(__A , __A ) a =model_cls(__A ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' ''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' ) else: a =TF_MODEL_MAPPING[config.__class__](__A ) # encoder-decoder has vocab size saved differently a =config.vocab_size if hasattr(__A , '''vocab_size''' ) else config.encoder.vocab_size a =random_input_ids(__A , __A , __A ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(__A , decoder_input_ids=__A , training=__A ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(__A , training=__A ) a =encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> Callable[[], None]: a =self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' ) if self.args.fpaa: raise NotImplementedError('''Mixed precision is currently not supported.''' ) a =( hasattr(__A , '''architectures''' ) and isinstance(config.architectures , __A ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: a ='''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model a =__import__('''transformers''' , fromlist=[model_class] ) a =getattr(__A , __A ) a =model_cls(__A ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' ''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' ) else: a =TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__A ) # encoder-decoder has vocab size saved differently a =config.vocab_size if hasattr(__A , '''vocab_size''' ) else config.encoder.vocab_size a =random_input_ids(__A , __A , __A ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): a =model(__A , decoder_input_ids=__A , labels=__A , training=__A )[0] a =tf.gradients(__A , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): a =model(__A , labels=__A , training=__A )[0] a =tf.gradients(__A , model.trainable_variables ) return gradients a =encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def SCREAMING_SNAKE_CASE ( self , __A ) -> float: with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' ) timeit.repeat(__A , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average a =timeit.repeat( __A , repeat=self.args.repeat , number=10 , ) return min(__A ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) def SCREAMING_SNAKE_CASE ( self , __A ) -> [Memory, MemorySummary]: logger.info( '''Note that TensorFlow allocates more memory than ''' '''it might need to speed up computation. ''' '''The memory reported here corresponds to the memory ''' '''reported by `nvidia-smi`, which can vary depending ''' '''on total available memory on the GPU that is used.''' ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( '''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory''' ''' consumption line by line.''' ) a =start_memory_tracing('''transformers''' ) if self.args.is_tpu: # tpu raise NotImplementedError( '''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking''' ''' with `args.memory=False`''' ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( '''py3nvml not installed, we won\'t log GPU memory usage. ''' '''Install py3nvml (pip install py3nvml) to log information about GPU.''' ) a ='''N/A''' else: logger.info( '''Measuring total GPU usage on GPU device. Make sure to not have additional processes''' ''' running on the same GPU.''' ) # init nvml nvml.nvmlInit() func() a =nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) a =nvml.nvmlDeviceGetMemoryInfo(__A ) a =meminfo.used a =Memory(__A ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( '''When enabling line by line tracing, the max peak memory for CPU is inaccurate in''' ''' TensorFlow.''' ) a =None else: a =measure_peak_memory_cpu(__A ) a =Memory(__A ) if isinstance(__A , __A ) else memory_bytes if self.args.trace_memory_line_by_line: a =stop_memory_tracing(__A ) if memory is None: a =summary.total else: a =None return memory, summary except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
81
"""simple docstring""" import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :] def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__="attention" ) -> List[Any]: lowerCamelCase = lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] ) lowerCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] ) lowerCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] ) lowerCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] ) lowerCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ) -> List[str]: if split_mlp_wi: lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :] lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :] lowerCamelCase = (wi_a, wi_a) else: lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :] lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :] return wi, wo def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple: return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i] def a__ ( snake_case__ , *, snake_case__ , snake_case__ , snake_case__ = False ) -> Dict: lowerCamelCase = traverse_util.flatten_dict(variables["""target"""] ) lowerCamelCase = {"""/""".join(snake_case__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowerCamelCase = """encoder/encoder/mlp/wi_0/kernel""" in old print("""Split MLP:""" , snake_case__ ) lowerCamelCase = collections.OrderedDict() # Shared embeddings. lowerCamelCase = old["""token_embedder/embedding"""] # Encoder. for i in range(snake_case__ ): # Block i, layer 0 (Self Attention). lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_attention_layer_norm""" ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """encoder""" , """attention""" ) lowerCamelCase = layer_norm lowerCamelCase = k.T lowerCamelCase = o.T lowerCamelCase = q.T lowerCamelCase = v.T # Block i, layer 1 (MLP). lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_mlp_layer_norm""" ) lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """encoder""" , snake_case__ ) lowerCamelCase = layer_norm if split_mlp_wi: lowerCamelCase = wi[0].T lowerCamelCase = wi[1].T else: lowerCamelCase = wi.T lowerCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer lowerCamelCase = tax_relpos_bias_lookup( snake_case__ , snake_case__ , """encoder""" ).T lowerCamelCase = old["""encoder/encoder_norm/scale"""] if not scalable_attention: lowerCamelCase = tax_relpos_bias_lookup( snake_case__ , 0 , """encoder""" ).T lowerCamelCase = tax_relpos_bias_lookup( snake_case__ , 0 , """decoder""" ).T if not is_encoder_only: # Decoder. for i in range(snake_case__ ): # Block i, layer 0 (Self Attention). lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_self_attention_layer_norm""" ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """self_attention""" ) lowerCamelCase = layer_norm lowerCamelCase = k.T lowerCamelCase = o.T lowerCamelCase = q.T lowerCamelCase = v.T # Block i, layer 1 (Cross Attention). lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_cross_attention_layer_norm""" ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """encoder_decoder_attention""" ) lowerCamelCase = layer_norm lowerCamelCase = k.T lowerCamelCase = o.T lowerCamelCase = q.T lowerCamelCase = v.T # Block i, layer 2 (MLP). lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_mlp_layer_norm""" ) lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """decoder""" , snake_case__ ) lowerCamelCase = layer_norm if split_mlp_wi: lowerCamelCase = wi[0].T lowerCamelCase = wi[1].T else: lowerCamelCase = wi.T lowerCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer lowerCamelCase = tax_relpos_bias_lookup(snake_case__ , snake_case__ , """decoder""" ).T lowerCamelCase = old["""decoder/decoder_norm/scale"""] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowerCamelCase = old["""decoder/logits_dense/kernel"""].T return new def a__ ( snake_case__ , snake_case__ ) -> Optional[int]: lowerCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowerCamelCase = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowerCamelCase = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) lowerCamelCase = state_dict["""shared.weight"""] return state_dict def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: lowerCamelCase = checkpoints.load_tax_checkpoint(snake_case__ ) lowerCamelCase = convert_tax_to_pytorch( snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ , scalable_attention=snake_case__ ) lowerCamelCase = make_state_dict(snake_case__ , snake_case__ ) model.load_state_dict(snake_case__ , strict=snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = False , ) -> str: lowerCamelCase = MTaConfig.from_json_file(snake_case__ ) print(F'Building PyTorch model from configuration: {config}' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowerCamelCase = UMTaEncoderModel(snake_case__ ) else: lowerCamelCase = UMTaForConditionalGeneration(snake_case__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(snake_case__ ) # Verify that we can load the checkpoint. model.from_pretrained(snake_case__ ) print("""Done""" ) if __name__ == "__main__": lowerCAmelCase : Optional[int] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""") # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False ) parser.add_argument( """--scalable_attention""", action="""store_true""", help="""Whether the model uses scaled attention (umt5 model)""", default=False, ) lowerCAmelCase : int = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
291
0
import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase = BertTokenizer __lowerCamelCase = BertTokenizerFast __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = filter_non_english def snake_case ( self ): """simple docstring""" super().setUp() _lowerCAmelCase = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] _lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def snake_case ( self , _snake_case ): """simple docstring""" _lowerCAmelCase = """UNwant\u00E9d,running""" _lowerCAmelCase = """unwanted, running""" return input_text, output_text def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.tokenizer_class(self.vocab_file ) _lowerCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(_snake_case , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [9, 6, 7, 12, 10, 11] ) def snake_case ( self ): """simple docstring""" if not self.test_rust_tokenizer: return _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = self.get_rust_tokenizer() _lowerCAmelCase = """UNwant\u00E9d,running""" _lowerCAmelCase = tokenizer.tokenize(_snake_case ) _lowerCAmelCase = rust_tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case , _snake_case ) _lowerCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) _lowerCAmelCase = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) self.assertListEqual(_snake_case , _snake_case ) _lowerCAmelCase = self.get_rust_tokenizer() _lowerCAmelCase = tokenizer.encode(_snake_case ) _lowerCAmelCase = rust_tokenizer.encode(_snake_case ) self.assertListEqual(_snake_case , _snake_case ) # With lower casing _lowerCAmelCase = self.get_tokenizer(do_lower_case=_snake_case ) _lowerCAmelCase = self.get_rust_tokenizer(do_lower_case=_snake_case ) _lowerCAmelCase = """UNwant\u00E9d,running""" _lowerCAmelCase = tokenizer.tokenize(_snake_case ) _lowerCAmelCase = rust_tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case , _snake_case ) _lowerCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) _lowerCAmelCase = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) self.assertListEqual(_snake_case , _snake_case ) _lowerCAmelCase = self.get_rust_tokenizer() _lowerCAmelCase = tokenizer.encode(_snake_case ) _lowerCAmelCase = rust_tokenizer.encode(_snake_case ) self.assertListEqual(_snake_case , _snake_case ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case , never_split=["""[UNK]"""] ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = BasicTokenizer() _lowerCAmelCase = """a\n'll !!to?'d of, can't.""" _lowerCAmelCase = ["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""] self.assertListEqual(tokenizer.tokenize(_snake_case ) , _snake_case ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] _lowerCAmelCase = {} for i, token in enumerate(_snake_case ): _lowerCAmelCase = i _lowerCAmelCase = WordpieceTokenizer(vocab=_snake_case , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] ) def snake_case ( self ): """simple docstring""" self.assertTrue(_is_whitespace(""" """ ) ) self.assertTrue(_is_whitespace("""\t""" ) ) self.assertTrue(_is_whitespace("""\r""" ) ) self.assertTrue(_is_whitespace("""\n""" ) ) self.assertTrue(_is_whitespace("""\u00A0""" ) ) self.assertFalse(_is_whitespace("""A""" ) ) self.assertFalse(_is_whitespace("""-""" ) ) def snake_case ( self ): """simple docstring""" self.assertTrue(_is_control("""\u0005""" ) ) self.assertFalse(_is_control("""A""" ) ) self.assertFalse(_is_control(""" """ ) ) self.assertFalse(_is_control("""\t""" ) ) self.assertFalse(_is_control("""\r""" ) ) def snake_case ( self ): """simple docstring""" self.assertTrue(_is_punctuation("""-""" ) ) self.assertTrue(_is_punctuation("""$""" ) ) self.assertTrue(_is_punctuation("""`""" ) ) self.assertTrue(_is_punctuation(""".""" ) ) self.assertFalse(_is_punctuation("""A""" ) ) self.assertFalse(_is_punctuation(""" """ ) ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_snake_case ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) self.assertListEqual( [rust_tokenizer.tokenize(_snake_case ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) @slow def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.tokenizer_class.from_pretrained("""bert-base-uncased""" ) _lowerCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=_snake_case ) _lowerCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_snake_case ) _lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_snake_case ) _lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def snake_case ( self ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case ) _lowerCAmelCase = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.' _lowerCAmelCase = tokenizer_r.encode_plus( _snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case , ) _lowerCAmelCase = tokenizer_r.do_lower_case if hasattr(_snake_case , """do_lower_case""" ) else False _lowerCAmelCase = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """Allen"""), ((21, 23), """##NL"""), ((23, 24), """##P"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """allen"""), ((21, 23), """##nl"""), ((23, 24), """##p"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = ["""的""", """人""", """有"""] _lowerCAmelCase = """""".join(_snake_case ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _lowerCAmelCase = True _lowerCAmelCase = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case ) _lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case ) _lowerCAmelCase = tokenizer_p.encode(_snake_case , add_special_tokens=_snake_case ) _lowerCAmelCase = tokenizer_r.encode(_snake_case , add_special_tokens=_snake_case ) _lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(_snake_case ) _lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(_snake_case ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_snake_case , _snake_case ) self.assertListEqual(_snake_case , _snake_case ) _lowerCAmelCase = False _lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case ) _lowerCAmelCase = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case ) _lowerCAmelCase = tokenizer_r.encode(_snake_case , add_special_tokens=_snake_case ) _lowerCAmelCase = tokenizer_p.encode(_snake_case , add_special_tokens=_snake_case ) _lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(_snake_case ) _lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(_snake_case ) # it is expected that only the first Chinese character is not preceded by "##". _lowerCAmelCase = [ F'##{token}' if idx != 0 else token for idx, token in enumerate(_snake_case ) ] self.assertListEqual(_snake_case , _snake_case ) self.assertListEqual(_snake_case , _snake_case )
82
"""simple docstring""" from __future__ import annotations def a__ ( snake_case__ , snake_case__ ) -> bool: if len(snake_case__ ) == 0: return False lowerCamelCase = len(snake_case__ ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , snake_case__ ) else: return binary_search(a_list[midpoint + 1 :] , snake_case__ ) if __name__ == "__main__": lowerCAmelCase : List[Any] = input("""Enter numbers separated by comma:\n""").strip() lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(""",""")] lowerCAmelCase : Optional[int] = int(input("""Enter the number to be found in the list:\n""").strip()) lowerCAmelCase : Union[str, Any] = """""" if binary_search(sequence, target) else """not """ print(F"""{target} was {not_str}found in {sequence}""")
291
0
'''simple docstring''' from typing import List, Union import numpy as np from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, logging from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline snake_case_ : int = logging.get_logger(__name__) class lowercase__ ( lowercase ): def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : str ): '''simple docstring''' if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): _UpperCamelCase : Union[str, Any] = [label.strip() for label in labels.split(',' ) if label.strip()] return labels def __call__( self : Optional[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Dict ): '''simple docstring''' if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0: raise ValueError('You must include at least one label and at least one sequence.' ) if hypothesis_template.format(labels[0] ) == hypothesis_template: raise ValueError( ( 'The provided hypothesis_template "{}" was not able to be formatted with the target labels. ' 'Make sure the passed template includes formatting syntax such as {{}} where the label should go.' ).format(lowerCamelCase__ ) ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): _UpperCamelCase : Dict = [sequences] _UpperCamelCase : str = [] for sequence in sequences: sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase__ )] for label in labels] ) return sequence_pairs, sequences @add_end_docstrings(lowercase ) class lowercase__ ( lowercase ): def __init__( self : str ,lowerCamelCase__ : str=ZeroShotClassificationArgumentHandler() ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : Tuple ): '''simple docstring''' _UpperCamelCase : Optional[Any] = args_parser super().__init__(*lowerCamelCase__ ,**lowerCamelCase__ ) if self.entailment_id == -1: logger.warning( 'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to ' '-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' ) @property def UpperCamelCase_ ( self : str ): '''simple docstring''' for label, ind in self.model.config.labelaid.items(): if label.lower().startswith('entail' ): return ind return -1 def UpperCamelCase_ ( self : int ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Optional[int]=TruncationStrategy.ONLY_FIRST ,**lowerCamelCase__ : Union[str, Any] ): '''simple docstring''' _UpperCamelCase : Tuple = self.framework if self.tokenizer.pad_token is None: # Override for tokenizers not supporting padding logger.error( 'Tokenizer was not supporting padding necessary for zero-shot, attempting to use ' ' `pad_token=eos_token`' ) _UpperCamelCase : List[str] = self.tokenizer.eos_token try: _UpperCamelCase : Optional[Any] = self.tokenizer( lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,) except Exception as e: if "too short" in str(lowerCamelCase__ ): # tokenizers might yell that we want to truncate # to a value that is not even reached by the input. # In that case we don't want to truncate. # It seems there's not a really better way to catch that # exception. _UpperCamelCase : int = self.tokenizer( lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=TruncationStrategy.DO_NOT_TRUNCATE ,) else: raise e return inputs def UpperCamelCase_ ( self : Any ,**lowerCamelCase__ : str ): '''simple docstring''' if kwargs.get('multi_class' ,lowerCamelCase__ ) is not None: _UpperCamelCase : Union[str, Any] = kwargs['multi_class'] logger.warning( 'The `multi_class` argument has been deprecated and renamed to `multi_label`. ' '`multi_class` will be removed in a future version of Transformers.' ) _UpperCamelCase : Tuple = {} if "candidate_labels" in kwargs: _UpperCamelCase : Optional[int] = self._args_parser._parse_labels(kwargs['candidate_labels'] ) if "hypothesis_template" in kwargs: _UpperCamelCase : List[Any] = kwargs['hypothesis_template'] _UpperCamelCase : int = {} if "multi_label" in kwargs: _UpperCamelCase : Dict = kwargs['multi_label'] return preprocess_params, {}, postprocess_params def __call__( self : Optional[int] ,lowerCamelCase__ : Union[str, List[str]] ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : List[str] ,): '''simple docstring''' if len(lowerCamelCase__ ) == 0: pass elif len(lowerCamelCase__ ) == 1 and "candidate_labels" not in kwargs: _UpperCamelCase : List[Any] = args[0] else: raise ValueError(F'Unable to understand extra arguments {args}' ) return super().__call__(lowerCamelCase__ ,**lowerCamelCase__ ) def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : Optional[Any]="This example is {}." ): '''simple docstring''' _UpperCamelCase , _UpperCamelCase : Optional[int] = self._args_parser(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase__ ,lowerCamelCase__ ) ): _UpperCamelCase : List[str] = self._parse_and_tokenize([sequence_pair] ) yield { "candidate_label": candidate_label, "sequence": sequences[0], "is_last": i == len(lowerCamelCase__ ) - 1, **model_input, } def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : Union[str, Any] ): '''simple docstring''' _UpperCamelCase : Dict = inputs['candidate_label'] _UpperCamelCase : List[Any] = inputs['sequence'] _UpperCamelCase : Tuple = {k: inputs[k] for k in self.tokenizer.model_input_names} _UpperCamelCase : int = self.model(**lowerCamelCase__ ) _UpperCamelCase : Optional[int] = { 'candidate_label': candidate_label, 'sequence': sequence, 'is_last': inputs['is_last'], **outputs, } return model_outputs def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[int]=False ): '''simple docstring''' _UpperCamelCase : str = [outputs['candidate_label'] for outputs in model_outputs] _UpperCamelCase : List[str] = [outputs['sequence'] for outputs in model_outputs] _UpperCamelCase : Optional[Any] = np.concatenate([output['logits'].numpy() for output in model_outputs] ) _UpperCamelCase : str = logits.shape[0] _UpperCamelCase : Optional[Any] = len(lowerCamelCase__ ) _UpperCamelCase : str = N // n _UpperCamelCase : Optional[Any] = logits.reshape((num_sequences, n, -1) ) if multi_label or len(lowerCamelCase__ ) == 1: # softmax over the entailment vs. contradiction dim for each label independently _UpperCamelCase : Optional[Any] = self.entailment_id _UpperCamelCase : str = -1 if entailment_id == 0 else 0 _UpperCamelCase : str = reshaped_outputs[..., [contradiction_id, entailment_id]] _UpperCamelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 ,keepdims=lowerCamelCase__ ) _UpperCamelCase : List[str] = scores[..., 1] else: # softmax the "entailment" logits over all candidate labels _UpperCamelCase : Dict = reshaped_outputs[..., self.entailment_id] _UpperCamelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 ,keepdims=lowerCamelCase__ ) _UpperCamelCase : Tuple = list(reversed(scores[0].argsort() ) ) return { "sequence": sequences[0], "labels": [candidate_labels[i] for i in top_inds], "scores": scores[0, top_inds].tolist(), }
83
"""simple docstring""" def a__ ( snake_case__ ) -> list: if len(snake_case__ ) < 2: return collection def circle_sort_util(snake_case__ , snake_case__ , snake_case__ ) -> bool: lowerCamelCase = False if low == high: return swapped lowerCamelCase = low lowerCamelCase = high while left < right: if collection[left] > collection[right]: lowerCamelCase , lowerCamelCase = ( collection[right], collection[left], ) lowerCamelCase = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: lowerCamelCase , lowerCamelCase = ( collection[right + 1], collection[left], ) lowerCamelCase = True lowerCamelCase = low + int((high - low) / 2 ) lowerCamelCase = circle_sort_util(snake_case__ , snake_case__ , snake_case__ ) lowerCamelCase = circle_sort_util(snake_case__ , mid + 1 , snake_case__ ) return swapped or left_swap or right_swap lowerCamelCase = True while is_not_sorted is True: lowerCamelCase = circle_sort_util(snake_case__ , 0 , len(snake_case__ ) - 1 ) return collection if __name__ == "__main__": lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip() lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(""",""")] print(circle_sort(unsorted))
291
0
"""simple docstring""" import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser __UpperCAmelCase = logging.getLogger(__name__) torch.set_grad_enabled(False) __UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu' def _snake_case ( lowercase__ : str , lowercase__ : int=1_0_0 , lowercase__ : int=" " ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Tuple = text.split(lowercase__ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowercase__ ) , lowercase__ )] def _snake_case ( lowercase__ : dict ) -> dict: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = [], [] for title, text in zip(documents["""title"""] , documents["""text"""] ): if text is not None: for passage in split_text(lowercase__ ): titles.append(title if title is not None else """""" ) texts.append(lowercase__ ) return {"title": titles, "text": texts} def _snake_case ( lowercase__ : dict , lowercase__ : DPRContextEncoder , lowercase__ : DPRContextEncoderTokenizerFast ) -> dict: '''simple docstring''' lowerCAmelCase_ :Tuple = ctx_tokenizer( documents["""title"""] , documents["""text"""] , truncation=lowercase__ , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""] lowerCAmelCase_ :int = ctx_encoder(input_ids.to(device=lowercase__ ) , return_dict=lowercase__ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def _snake_case ( lowercase__ : "RagExampleArguments" , lowercase__ : "ProcessingArguments" , lowercase__ : "IndexHnswArguments" , ) -> Optional[Any]: '''simple docstring''' logger.info("""Step 1 - Create the dataset""" ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowerCAmelCase_ :Tuple = load_dataset( """csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowerCAmelCase_ :str = dataset.map(lowercase__ , batched=lowercase__ , num_proc=processing_args.num_proc ) # And compute the embeddings lowerCAmelCase_ :Optional[int] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowercase__ ) lowerCAmelCase_ :List[Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) lowerCAmelCase_ :str = Features( {"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space lowerCAmelCase_ :str = dataset.map( partial(lowercase__ , ctx_encoder=lowercase__ , ctx_tokenizer=lowercase__ ) , batched=lowercase__ , batch_size=processing_args.batch_size , features=lowercase__ , ) # And finally save your dataset lowerCAmelCase_ :Dict = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" ) dataset.save_to_disk(lowercase__ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("""Step 2 - Index the dataset""" ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowerCAmelCase_ :Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index("""embeddings""" , custom_index=lowercase__ ) # And save the index lowerCAmelCase_ :Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" ) dataset.get_index("""embeddings""" ).save(lowercase__ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class _SCREAMING_SNAKE_CASE : UpperCAmelCase_ :str = field( default=str(Path(A__ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , ) UpperCAmelCase_ :Optional[str] = field( default=A__ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , ) UpperCAmelCase_ :str = field( default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , ) UpperCAmelCase_ :str = field( default="facebook/dpr-ctx_encoder-multiset-base" , metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } , ) UpperCAmelCase_ :Optional[str] = field( default=str(Path(A__ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , ) @dataclass class _SCREAMING_SNAKE_CASE : UpperCAmelCase_ :Optional[int] = field( default=A__ , metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } , ) UpperCAmelCase_ :int = field( default=16 , metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } , ) @dataclass class _SCREAMING_SNAKE_CASE : UpperCAmelCase_ :int = field( default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , ) UpperCAmelCase_ :int = field( default=128 , metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) __UpperCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: __UpperCAmelCase = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
84
"""simple docstring""" from collections.abc import Generator def a__ ( ) -> Generator[int, None, None]: lowerCamelCase , lowerCamelCase = 0, 1 while True: lowerCamelCase , lowerCamelCase = b, a + b yield b def a__ ( snake_case__ = 10_00 ) -> int: lowerCamelCase = 1 lowerCamelCase = fibonacci_generator() while len(str(next(snake_case__ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
291
0
'''simple docstring''' import os from pathlib import Path def UpperCamelCase_( ): '''simple docstring''' from torch.utils.cpp_extension import load snake_case_ = Path(snake_case ).resolve().parent.parent.parent / "kernels" / "deformable_detr" snake_case_ = [ root / filename for filename in [ "vision.cpp", os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ), os.path.join("cuda" , "ms_deform_attn_cuda.cu" ), ] ] load( "MultiScaleDeformableAttention" , snake_case , with_cuda=snake_case , extra_include_paths=[str(snake_case )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[ "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
85
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase : List[str] = logging.get_logger(__name__) class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["audio_values", "audio_mask"] def __init__( self , _a=2_048 , _a=1 , _a=[16, 16] , _a=128 , _a=44_100 , _a=86 , _a=2_048 , _a=0.0 , **_a , ): """simple docstring""" super().__init__( feature_size=_a , sampling_rate=_a , padding_value=_a , **_a , ) lowerCamelCase = spectrogram_length lowerCamelCase = num_channels lowerCamelCase = patch_size lowerCamelCase = feature_size // self.patch_size[1] lowerCamelCase = n_fft lowerCamelCase = sampling_rate // hop_length_to_sampling_rate lowerCamelCase = sampling_rate lowerCamelCase = padding_value lowerCamelCase = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=_a , norm="""slaney""" , mel_scale="""slaney""" , ).T def _lowerCAmelCase ( self , _a ): """simple docstring""" lowerCamelCase = spectrogram( _a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , ) lowerCamelCase = log_spec[:, :-1] lowerCamelCase = log_spec - 20.0 lowerCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , _a , _a = None , _a = True , _a = None , _a = False , _a = False , **_a , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( """This feature extractor is set to support sampling rate""" f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled' f' with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) lowerCamelCase = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) lowerCamelCase = is_batched_numpy or ( isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_a , np.ndarray ): lowerCamelCase = np.asarray(_a , dtype=np.floataa ) elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowerCamelCase = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , _a ): lowerCamelCase = [np.asarray(_a , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowerCamelCase = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowerCamelCase = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowerCamelCase = np.array(_a ).astype(np.floataa ) # convert into correct format for padding lowerCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowerCamelCase = np.ones([len(_a ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowerCamelCase = padded_audio_features * self.padding_value for i in range(len(_a ) ): lowerCamelCase = audio_features[i] lowerCamelCase = feature # return as BatchFeature if return_attention_mask: lowerCamelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask} else: lowerCamelCase = {"""audio_values""": padded_audio_features} lowerCamelCase = BatchFeature(data=_a , tensor_type=_a ) return encoded_inputs
291
0
"""simple docstring""" from __future__ import annotations import time import numpy as np lowerCamelCase__ = [8, 5, 9, 7] lowerCamelCase__ = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] lowerCamelCase__ = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : List[str] = claim_vector __lowerCAmelCase : str = allocated_resources_table __lowerCAmelCase : Optional[int] = maximum_claim_table def __lowerCamelCase ( self ): return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def __lowerCamelCase ( self ): return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def __lowerCamelCase ( self ): return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(_SCREAMING_SNAKE_CASE ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def __lowerCamelCase ( self ): return {self.__need().index(_SCREAMING_SNAKE_CASE ): i for i in self.__need()} def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = self.__need() __lowerCAmelCase : int = self.__allocated_resources_table __lowerCAmelCase : List[str] = self.__available_resources() __lowerCAmelCase : Dict = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('_' * 50 + '\n' ) while need_list: __lowerCAmelCase : Optional[Any] = False for each_need in need_list: __lowerCAmelCase : Optional[int] = True for index, need in enumerate(_SCREAMING_SNAKE_CASE ): if need > available_resources[index]: __lowerCAmelCase : List[Any] = False break if execution: __lowerCAmelCase : Optional[int] = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: __lowerCAmelCase : Optional[int] = original_need_index print(f"Process {process_number + 1} is executing." ) # remove the process run from stack need_list.remove(_SCREAMING_SNAKE_CASE ) # update available/freed resources stack __lowerCAmelCase : Optional[int] = np.array(_SCREAMING_SNAKE_CASE ) + np.array( alloc_resources_table[process_number] ) print( 'Updated available resource stack for processes: ' + ' '.join([str(_SCREAMING_SNAKE_CASE ) for x in available_resources] ) ) break if safe: print('The process is in a safe state.\n' ) else: print('System in unsafe state. Aborting...\n' ) break def __lowerCamelCase ( self ): print(' ' * 9 + 'Allocated Resource Table' ) for item in self.__allocated_resources_table: print( f"P{self.__allocated_resources_table.index(_SCREAMING_SNAKE_CASE ) + 1}" + ' '.join(f"{it:>8}" for it in item ) + '\n' ) print(' ' * 9 + 'System Resource Table' ) for item in self.__maximum_claim_table: print( f"P{self.__maximum_claim_table.index(_SCREAMING_SNAKE_CASE ) + 1}" + ' '.join(f"{it:>8}" for it in item ) + '\n' ) print( 'Current Usage by Active Processes: ' + ' '.join(str(_SCREAMING_SNAKE_CASE ) for x in self.__claim_vector ) ) print( 'Initial Available Resources: ' + ' '.join(str(_SCREAMING_SNAKE_CASE ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
86
"""simple docstring""" from math import ceil def a__ ( snake_case__ , snake_case__ ) -> Optional[int]: lowerCamelCase = list(range(0 , snake_case__ ) ) lowerCamelCase = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check lowerCamelCase = [] for i in device_map_blocks: if device_map_blocks.count(snake_case__ ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(snake_case__ ) # Missing blocks lowerCamelCase = [i for i in blocks if i not in device_map_blocks] lowerCamelCase = [i for i in device_map_blocks if i not in blocks] if len(snake_case__ ) != 0: raise ValueError( """Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.""" """ These attention blocks were specified more than once: """ + str(snake_case__ ) ) if len(snake_case__ ) != 0: raise ValueError( """There are attention blocks for this model that are not specified in the device_map. Add these attention """ """blocks to a device on the device_map: """ + str(snake_case__ ) ) if len(snake_case__ ) != 0: raise ValueError( """The device_map contains more attention blocks than this model has. Remove these from the device_map:""" + str(snake_case__ ) ) def a__ ( snake_case__ , snake_case__ ) -> List[Any]: lowerCamelCase = list(range(snake_case__ ) ) lowerCamelCase = int(ceil(n_layers / len(snake_case__ ) ) ) lowerCamelCase = [layers[i : i + n_blocks] for i in range(0 , snake_case__ , snake_case__ )] return dict(zip(snake_case__ , snake_case__ ) )
291
0
from __future__ import annotations def lowercase_ ( _lowerCamelCase : list[int]): return len(set(_lowerCamelCase)) == len(_lowerCamelCase) if __name__ == "__main__": import doctest doctest.testmod()
87
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __magic_name__ ( unittest.TestCase ): '''simple docstring''' def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ): """simple docstring""" lowerCamelCase = parent lowerCamelCase = batch_size lowerCamelCase = seq_length lowerCamelCase = is_training lowerCamelCase = use_attention_mask lowerCamelCase = use_token_type_ids lowerCamelCase = use_labels lowerCamelCase = vocab_size lowerCamelCase = hidden_size lowerCamelCase = num_hidden_layers lowerCamelCase = num_attention_heads lowerCamelCase = intermediate_size lowerCamelCase = hidden_act lowerCamelCase = hidden_dropout_prob lowerCamelCase = attention_probs_dropout_prob lowerCamelCase = max_position_embeddings lowerCamelCase = type_vocab_size lowerCamelCase = type_sequence_label_size lowerCamelCase = initializer_range lowerCamelCase = num_choices def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase = None if self.use_attention_mask: lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase = None if self.use_token_type_ids: lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = True __UpperCamelCase = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = FlaxRoFormerModelTester(self ) @slow def _lowerCAmelCase ( self ): """simple docstring""" for model_class_name in self.all_model_classes: lowerCamelCase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_a ) lowerCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(_a ) @require_flax class __magic_name__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) lowerCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase = model(_a )[0] lowerCamelCase = 50_000 lowerCamelCase = (1, 6, vocab_size) self.assertEqual(output.shape , _a ) lowerCamelCase = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
291
0
from __future__ import annotations import string from itertools import cycle, product from pathlib import Path __lowerCAmelCase : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) __lowerCAmelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase] __lowerCAmelCase : set[int] = {ord(char) for char in VALID_CHARS} __lowerCAmelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = "" __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 for keychar, cipherchar in zip(cycle(A_ ), A_ ): __magic_name__ = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(A_ ) return decoded def a__ ( A_ ): '''simple docstring''' __magic_name__ = [] for key in product(A_, repeat=3 ): __magic_name__ = try_key(A_, A_ ) if encoded is not None: possibles.append(A_ ) return possibles def a__ ( A_, A_ ): '''simple docstring''' return [possible for possible in possibles if common_word in possible.lower()] def a__ ( A_ = "p059_cipher.txt" ): '''simple docstring''' __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = Path(A_ ).parent.joinpath(A_ ).read_text(encoding="""utf-8""" ) __magic_name__ = [int(A_ ) for number in data.strip().split(""",""" )] __magic_name__ = filter_valid_chars(A_ ) for common_word in COMMON_WORDS: __magic_name__ = filter_common_word(A_, A_ ) if len(A_ ) == 1: break __magic_name__ = possibles[0] return sum(ord(A_ ) for char in decoded_text ) if __name__ == "__main__": print(F'''{solution() = }''')
88
"""simple docstring""" from typing import Any def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> list: _validation( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) # Creates data structures and fill initial step lowerCamelCase = {} lowerCamelCase = {} for state in states_space: lowerCamelCase = observations_space[0] lowerCamelCase = ( initial_probabilities[state] * emission_probabilities[state][observation] ) lowerCamelCase = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(snake_case__ ) ): lowerCamelCase = observations_space[o] lowerCamelCase = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function lowerCamelCase = """""" lowerCamelCase = -1 for k_state in states_space: lowerCamelCase = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: lowerCamelCase = probability lowerCamelCase = k_state # Update probabilities and pointers dicts lowerCamelCase = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) lowerCamelCase = arg_max # The final observation lowerCamelCase = observations_space[len(snake_case__ ) - 1] # argmax for given final observation lowerCamelCase = """""" lowerCamelCase = -1 for k_state in states_space: lowerCamelCase = probabilities[(k_state, final_observation)] if probability > max_probability: lowerCamelCase = probability lowerCamelCase = k_state lowerCamelCase = arg_max # Process pointers backwards lowerCamelCase = last_state lowerCamelCase = [] for o in range(len(snake_case__ ) - 1 , -1 , -1 ): result.append(snake_case__ ) lowerCamelCase = pointers[previous, observations_space[o]] result.reverse() return result def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None: _validate_not_empty( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) _validate_lists(snake_case__ , snake_case__ ) _validate_dicts( snake_case__ , snake_case__ , snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None: if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("""There's an empty parameter""" ) def a__ ( snake_case__ , snake_case__ ) -> None: _validate_list(snake_case__ , """observations_space""" ) _validate_list(snake_case__ , """states_space""" ) def a__ ( snake_case__ , snake_case__ ) -> None: if not isinstance(_object , snake_case__ ): lowerCamelCase = F'{var_name} must be a list' raise ValueError(snake_case__ ) else: for x in _object: if not isinstance(snake_case__ , snake_case__ ): lowerCamelCase = F'{var_name} must be a list of strings' raise ValueError(snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , ) -> None: _validate_dict(snake_case__ , """initial_probabilities""" , snake_case__ ) _validate_nested_dict(snake_case__ , """transition_probabilities""" ) _validate_nested_dict(snake_case__ , """emission_probabilities""" ) def a__ ( snake_case__ , snake_case__ ) -> None: _validate_dict(_object , snake_case__ , snake_case__ ) for x in _object.values(): _validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False ) -> None: if not isinstance(_object , snake_case__ ): lowerCamelCase = F'{var_name} must be a dict' raise ValueError(snake_case__ ) if not all(isinstance(snake_case__ , snake_case__ ) for x in _object ): lowerCamelCase = F'{var_name} all keys must be strings' raise ValueError(snake_case__ ) if not all(isinstance(snake_case__ , snake_case__ ) for x in _object.values() ): lowerCamelCase = """nested dictionary """ if nested else """""" lowerCamelCase = F'{var_name} {nested_text}all values must be {value_type.__name__}' raise ValueError(snake_case__ ) if __name__ == "__main__": from doctest import testmod testmod()
291
0
'''simple docstring''' import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available __lowerCAmelCase = logging.getLogger(__name__) @dataclass class __magic_name__ : lowerCAmelCase : str lowerCAmelCase : List[str] lowerCAmelCase : Optional[List[str]] @dataclass class __magic_name__ : lowerCAmelCase : List[int] lowerCAmelCase : List[int] lowerCAmelCase : Optional[List[int]] = None lowerCAmelCase : Optional[List[int]] = None class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : List[Any] = 'train' lowerCAmelCase : List[str] = 'dev' lowerCAmelCase : Optional[int] = 'test' class __magic_name__ : @staticmethod def __lowercase ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : Union[Split, str] ): raise NotImplementedError @staticmethod def __lowercase ( _UpperCAmelCase : str ): raise NotImplementedError @staticmethod def __lowercase ( _UpperCAmelCase : List[InputExample] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : int ,_UpperCAmelCase : PreTrainedTokenizer ,_UpperCAmelCase : Dict=False ,_UpperCAmelCase : int="[CLS]" ,_UpperCAmelCase : List[Any]=1 ,_UpperCAmelCase : Optional[int]="[SEP]" ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int=False ,_UpperCAmelCase : Tuple=0 ,_UpperCAmelCase : Any=0 ,_UpperCAmelCase : int=-100 ,_UpperCAmelCase : List[Any]=0 ,_UpperCAmelCase : str=True ,): _a : str = {label: i for i, label in enumerate(_UpperCAmelCase )} _a : str = [] for ex_index, example in enumerate(_UpperCAmelCase ): if ex_index % 10000 == 0: logger.info('Writing example %d of %d' ,_UpperCAmelCase ,len(_UpperCAmelCase ) ) _a : Optional[int] = [] _a : Union[str, Any] = [] for word, label in zip(example.words ,example.labels ): _a : str = tokenizer.tokenize(_UpperCAmelCase ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(_UpperCAmelCase ) > 0: tokens.extend(_UpperCAmelCase ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_UpperCAmelCase ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. _a : List[Any] = tokenizer.num_special_tokens_to_add() if len(_UpperCAmelCase ) > max_seq_length - special_tokens_count: _a : Optional[Any] = tokens[: (max_seq_length - special_tokens_count)] _a : Optional[int] = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] _a : str = [sequence_a_segment_id] * len(_UpperCAmelCase ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: _a : List[Any] = [cls_token] + tokens _a : Any = [pad_token_label_id] + label_ids _a : List[str] = [cls_token_segment_id] + segment_ids _a : Union[str, Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. _a : Dict = [1 if mask_padding_with_zero else 0] * len(_UpperCAmelCase ) # Zero-pad up to the sequence length. _a : int = max_seq_length - len(_UpperCAmelCase ) if pad_on_left: _a : Optional[Any] = ([pad_token] * padding_length) + input_ids _a : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask _a : str = ([pad_token_segment_id] * padding_length) + segment_ids _a : Optional[int] = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(_UpperCAmelCase ) == max_seq_length assert len(_UpperCAmelCase ) == max_seq_length assert len(_UpperCAmelCase ) == max_seq_length assert len(_UpperCAmelCase ) == max_seq_length if ex_index < 5: logger.info('*** Example ***' ) logger.info('guid: %s' ,example.guid ) logger.info('tokens: %s' ,' '.join([str(_UpperCAmelCase ) for x in tokens] ) ) logger.info('input_ids: %s' ,' '.join([str(_UpperCAmelCase ) for x in input_ids] ) ) logger.info('input_mask: %s' ,' '.join([str(_UpperCAmelCase ) for x in input_mask] ) ) logger.info('segment_ids: %s' ,' '.join([str(_UpperCAmelCase ) for x in segment_ids] ) ) logger.info('label_ids: %s' ,' '.join([str(_UpperCAmelCase ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: _a : Union[str, Any] = None features.append( InputFeatures( input_ids=_UpperCAmelCase ,attention_mask=_UpperCAmelCase ,token_type_ids=_UpperCAmelCase ,label_ids=_UpperCAmelCase ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : List[InputFeatures] lowerCAmelCase : int = nn.CrossEntropyLoss().ignore_index def __init__( self : int ,_UpperCAmelCase : TokenClassificationTask ,_UpperCAmelCase : str ,_UpperCAmelCase : PreTrainedTokenizer ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : List[Any]=False ,_UpperCAmelCase : Split = Split.train ,): # Load data features from cache or dataset file _a : str = os.path.join( _UpperCAmelCase ,'cached_{}_{}_{}'.format(mode.value ,tokenizer.__class__.__name__ ,str(_UpperCAmelCase ) ) ,) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. _a : Tuple = cached_features_file + '.lock' with FileLock(_UpperCAmelCase ): if os.path.exists(_UpperCAmelCase ) and not overwrite_cache: logger.info(F"""Loading features from cached file {cached_features_file}""" ) _a : int = torch.load(_UpperCAmelCase ) else: logger.info(F"""Creating features from dataset file at {data_dir}""" ) _a : List[str] = token_classification_task.read_examples_from_file(_UpperCAmelCase ,_UpperCAmelCase ) # TODO clean up all this to leverage built-in features of tokenizers _a : List[str] = token_classification_task.convert_examples_to_features( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=_UpperCAmelCase ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,) logger.info(F"""Saving features into cached file {cached_features_file}""" ) torch.save(self.features ,_UpperCAmelCase ) def __len__( self : Any ): return len(self.features ) def __getitem__( self : Tuple ,_UpperCAmelCase : Dict ): return self.features[i] if is_tf_available(): import tensorflow as tf class __magic_name__ : lowerCAmelCase : List[InputFeatures] lowerCAmelCase : int = -1_0_0 def __init__( self : str ,_UpperCAmelCase : TokenClassificationTask ,_UpperCAmelCase : str ,_UpperCAmelCase : PreTrainedTokenizer ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : int=False ,_UpperCAmelCase : Split = Split.train ,): _a : List[Any] = token_classification_task.read_examples_from_file(_UpperCAmelCase ,_UpperCAmelCase ) # TODO clean up all this to leverage built-in features of tokenizers _a : Dict = token_classification_task.convert_examples_to_features( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=_UpperCAmelCase ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: _a : Optional[Any] = tf.data.Dataset.from_generator( _UpperCAmelCase ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) ,( {'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )}, tf.TensorShape([None] ), ) ,) else: _a : str = tf.data.Dataset.from_generator( _UpperCAmelCase ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) ,( { 'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] ), 'token_type_ids': tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) ,) def __lowercase ( self : str ): _a : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__( self : Optional[Any] ): return len(self.features ) def __getitem__( self : str ,_UpperCAmelCase : int ): return self.features[i]
89
"""simple docstring""" import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase : Dict = logging.get_logger(__name__) def a__ ( snake_case__ ) -> Dict: lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" ) if "model" in sd.keys(): lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" )["""model"""] # pop unnecessary weights lowerCamelCase = [ """decoder.version""", """decoder.output_projection.weight""", ] for key in keys_to_delete: if key in sd: sd.pop(snake_case__ ) lowerCamelCase = { """decoder.project_in_dim.weight""": """decoder.project_in.weight""", """decoder.project_out_dim.weight""": """decoder.project_out.weight""", """decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""", """decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""", } for old_key, new_key in keys_to_rename.items(): if old_key in sd: lowerCamelCase = sd.pop(snake_case__ ) lowerCamelCase = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: lowerCamelCase = sd[key] # We split QKV in separate Q,K,V lowerCamelCase = key.replace(""".qkv_proj.""" , """.q_proj.""" ) lowerCamelCase = key.replace(""".qkv_proj.""" , """.k_proj.""" ) lowerCamelCase = key.replace(""".qkv_proj.""" , """.v_proj.""" ) lowerCamelCase = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 lowerCamelCase , lowerCamelCase , lowerCamelCase = torch.split(snake_case__ , depth // 3 , dim=0 ) lowerCamelCase = q lowerCamelCase = k lowerCamelCase = v del sd[key] return sd @torch.no_grad() def a__ ( snake_case__ , snake_case__ , snake_case__=None ) -> Tuple: lowerCamelCase = load_checkpoint(snake_case__ ) if config is not None: lowerCamelCase = OPTConfig.from_pretrained(snake_case__ ) else: lowerCamelCase = OPTConfig() lowerCamelCase = OPTModel(snake_case__ ).half().eval() model.load_state_dict(snake_case__ ) # Check results Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) model.save_pretrained(snake_case__ ) if __name__ == "__main__": lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--fairseq_path""", type=str, help=( """path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:""" """ https://huggingface.co/models?other=opt_metasq""" ), ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""") lowerCAmelCase : Optional[Any] = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
291
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = KandinskyInpaintPipeline snake_case_ = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image'''] snake_case_ = [ '''prompt''', '''negative_prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''', ] snake_case_ = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''negative_prompt''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] snake_case_ = False @property def lowercase_ ( self ) -> Any: '''simple docstring''' return 32 @property def lowercase_ ( self ) -> str: '''simple docstring''' return 32 @property def lowercase_ ( self ) -> Any: '''simple docstring''' return self.time_input_dim @property def lowercase_ ( self ) -> Tuple: '''simple docstring''' return self.time_input_dim * 4 @property def lowercase_ ( self ) -> List[Any]: '''simple docstring''' return 100 @property def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' ) return tokenizer @property def lowercase_ ( self ) -> Any: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , ) __lowerCamelCase = MultilingualCLIP(lowerCamelCase__ ) __lowerCamelCase = text_encoder.eval() return text_encoder @property def lowercase_ ( self ) -> Any: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'text_image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'text_image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } __lowerCamelCase = UNetaDConditionModel(**lowerCamelCase__ ) return model @property def lowercase_ ( self ) -> List[Any]: '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowercase_ ( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = VQModel(**self.dummy_movq_kwargs ) return model def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = self.dummy_text_encoder __lowerCamelCase = self.dummy_tokenizer __lowerCamelCase = self.dummy_unet __lowerCamelCase = self.dummy_movq __lowerCamelCase = DDIMScheduler( num_train_timesteps=1_000 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , prediction_type='epsilon' , thresholding=lowerCamelCase__ , ) __lowerCamelCase = { 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> List[str]: '''simple docstring''' __lowerCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) __lowerCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCamelCase__ ) # create init_image __lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) __lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCamelCase = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' ).resize((256, 256) ) # create mask __lowerCamelCase = np.ones((64, 64) , dtype=np.floataa ) __lowerCamelCase = 0 if str(lowerCamelCase__ ).startswith('mps' ): __lowerCamelCase = torch.manual_seed(lowerCamelCase__ ) else: __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) __lowerCamelCase = { 'prompt': 'horse', 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = 'cpu' __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**lowerCamelCase__ ) __lowerCamelCase = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) ) __lowerCamelCase = output.images __lowerCamelCase = pipe( **self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] print(f"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) __lowerCamelCase = np.array( [0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> List[str]: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' ) __lowerCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) __lowerCamelCase = np.ones((768, 768) , dtype=np.floataa ) __lowerCamelCase = 0 __lowerCamelCase = 'a hat' __lowerCamelCase = KandinskyPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa ) pipe_prior.to(lowerCamelCase__ ) __lowerCamelCase = KandinskyInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa ) __lowerCamelCase = pipeline.to(lowerCamelCase__ ) pipeline.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = torch.Generator(device='cpu' ).manual_seed(0 ) __lowerCamelCase , __lowerCamelCase = pipe_prior( lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple() __lowerCamelCase = pipeline( lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , ) __lowerCamelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
90
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class __magic_name__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = tempfile.mkdtemp() # fmt: off lowerCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""] # fmt: on lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) lowerCamelCase = { """do_resize""": True, """size""": {"""height""": 18, """width""": 18}, """do_normalize""": True, """image_mean""": [0.5, 0.5, 0.5], """image_std""": [0.5, 0.5, 0.5], } lowerCamelCase = os.path.join(self.tmpdirname , _a ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(_a , _a ) def _lowerCAmelCase ( self , **_a ): """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **_a ) def _lowerCAmelCase ( self , **_a ): """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a ) def _lowerCAmelCase ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_tokenizer() lowerCamelCase = self.get_image_processor() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowerCamelCase = self.get_image_processor(do_normalize=_a , padding_value=1.0 ) lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = self.prepare_image_inputs() lowerCamelCase = image_processor(_a , return_tensors="""np""" ) lowerCamelCase = processor(images=_a , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = """lower newer""" lowerCamelCase = processor(text=_a ) lowerCamelCase = tokenizer(_a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = """lower newer""" lowerCamelCase = self.prepare_image_inputs() lowerCamelCase = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with self.assertRaises(_a ): processor() def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase = processor.batch_decode(_a ) lowerCamelCase = tokenizer.batch_decode(_a ) self.assertListEqual(_a , _a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = """lower newer""" lowerCamelCase = self.prepare_image_inputs() lowerCamelCase = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
291
0
"""simple docstring""" import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer UpperCAmelCase_ : int = """bart""" UpperCAmelCase_ : Dict = True @st.cache(allow_output_mutation=__a ) def _A () -> Union[str, Any]: """simple docstring""" if LOAD_DENSE_INDEX: SCREAMING_SNAKE_CASE_ : List[str] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) SCREAMING_SNAKE_CASE_ : Dict = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = qar_model.eval() else: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = (None, None) if MODEL_TYPE == "bart": SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) SCREAMING_SNAKE_CASE_ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) SCREAMING_SNAKE_CASE_ : List[str] = sas_model.eval() else: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=__a ) def _A () -> Optional[int]: """simple docstring""" if LOAD_DENSE_INDEX: SCREAMING_SNAKE_CASE_ : int = faiss.StandardGpuResources() SCREAMING_SNAKE_CASE_ : int = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] SCREAMING_SNAKE_CASE_ : Optional[Any] = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , ) SCREAMING_SNAKE_CASE_ : str = faiss.IndexFlatIP(1_28 ) SCREAMING_SNAKE_CASE_ : Dict = faiss.index_cpu_to_gpu(__a , 1 , __a ) wikiaab_gpu_index_flat.add(__a ) # TODO fix for larger GPU else: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = (None, None) SCREAMING_SNAKE_CASE_ : List[Any] = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=__a ) def _A () -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) SCREAMING_SNAKE_CASE_ : str = elia['''train_eli5'''] SCREAMING_SNAKE_CASE_ : Optional[int] = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28) ) SCREAMING_SNAKE_CASE_ : str = faiss.IndexFlatIP(1_28 ) eli5_train_q_index.add(__a ) return (elia_train, eli5_train_q_index) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = load_indexes() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = load_models() UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = load_train_data() def _A (__a , __a=10 ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = embed_questions_for_retrieval([question] , __a , __a ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = eli5_train_q_index.search(__a , __a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = [elia_train[int(__a )] for i in I[0]] return nn_examples def _A (__a , __a="wiki40b" , __a="dense" , __a=10 ) -> str: """simple docstring""" if source == "none": SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = query_qa_dense_index( __a , __a , __a , __a , __a , __a ) else: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = query_es_index( __a , __a , index_name='''english_wiki40b_snippets_100w''' , n_results=__a , ) SCREAMING_SNAKE_CASE_ : str = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] SCREAMING_SNAKE_CASE_ : Optional[int] = '''question: {} context: {}'''.format(__a , __a ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda __a : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __a : None), } ) def _A (__a , __a , __a , __a=64 , __a=2_56 , __a=False , __a=2 , __a=0.95 , __a=0.8 ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Tuple = qa_sas_generate( __a , __a , __a , num_answers=1 , num_beams=__a , min_len=__a , max_len=__a , do_sample=__a , temp=__a , top_p=__a , top_k=__a , max_input_length=10_24 , device='''cuda:0''' , )[0] return (answer, support_list) st.title("""Long Form Question Answering with ELI5""") # Start sidebar UpperCAmelCase_ : Any = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>""" UpperCAmelCase_ : Optional[int] = """ <html> <head> <style> .img-container { padding-left: 90px; padding-right: 90px; padding-top: 50px; padding-bottom: 50px; background-color: #f0f3f9; } </style> </head> <body> <span class=\"img-container\"> <!-- Inline parent element --> %s </span> </body> </html> """ % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia UpperCAmelCase_ : Any = """ This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html). First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset, a pre-processed fixed snapshot of Wikipedia. """ st.sidebar.markdown(description, unsafe_allow_html=True) UpperCAmelCase_ : Optional[Any] = [ """Answer the question""", """View the retrieved document only""", """View the most similar ELI5 question and answer""", """Show me everything, please!""", ] UpperCAmelCase_ : Optional[Any] = st.sidebar.checkbox("""Demo options""") if demo_options: UpperCAmelCase_ : Optional[Any] = st.sidebar.selectbox( """""", action_list, index=3, ) UpperCAmelCase_ : Tuple = action_list.index(action_st) UpperCAmelCase_ : Optional[Any] = st.sidebar.selectbox( """""", ["""Show full text of passages""", """Show passage section titles"""], index=0, ) UpperCAmelCase_ : Union[str, Any] = show_type == """Show full text of passages""" else: UpperCAmelCase_ : int = 3 UpperCAmelCase_ : Optional[int] = True UpperCAmelCase_ : Optional[Any] = st.sidebar.checkbox("""Retrieval options""") if retrieval_options: UpperCAmelCase_ : Optional[Any] = """ ### Information retriever options The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs. The answer is then generated by sequence to sequence model which takes the question and retrieved document as input. """ st.sidebar.markdown(retriever_info) UpperCAmelCase_ : int = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""]) UpperCAmelCase_ : Dict = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""]) else: UpperCAmelCase_ : List[Any] = """wiki40b""" UpperCAmelCase_ : str = """dense""" UpperCAmelCase_ : Any = """beam""" UpperCAmelCase_ : Optional[Any] = 2 UpperCAmelCase_ : Optional[int] = 64 UpperCAmelCase_ : Any = 256 UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : Any = None UpperCAmelCase_ : Any = st.sidebar.checkbox("""Generation options""") if generate_options: UpperCAmelCase_ : int = """ ### Answer generation options The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large) weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with **beam** search, or **sample** from the decoder's output probabilities. """ st.sidebar.markdown(generate_info) UpperCAmelCase_ : Optional[int] = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""]) UpperCAmelCase_ : Optional[Any] = st.sidebar.slider( """Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) UpperCAmelCase_ : Optional[int] = st.sidebar.slider( """Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": UpperCAmelCase_ : Union[str, Any] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: UpperCAmelCase_ : str = st.sidebar.slider( """Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None ) UpperCAmelCase_ : str = st.sidebar.slider( """Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None ) UpperCAmelCase_ : Union[str, Any] = None # start main text UpperCAmelCase_ : Optional[int] = [ """<MY QUESTION>""", """How do people make chocolate?""", """Why do we get a fever when we are sick?""", """How can different animals perceive different colors?""", """What is natural language processing?""", """What's the best way to treat a sunburn?""", """What exactly are vitamins ?""", """How does nuclear energy provide electricity?""", """What's the difference between viruses and bacteria?""", """Why are flutes classified as woodwinds when most of them are made out of metal ?""", """Why do people like drinking coffee even though it tastes so bad?""", """What happens when wine ages? How does it make the wine taste better?""", """If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""", """How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""", """How does New Zealand have so many large bird predators?""", ] UpperCAmelCase_ : Union[str, Any] = st.selectbox( """What would you like to ask? ---- select <MY QUESTION> to enter a new query""", questions_list, index=1, ) if question_s == "<MY QUESTION>": UpperCAmelCase_ : Optional[int] = st.text_input("""Enter your question here:""", """""") else: UpperCAmelCase_ : int = question_s if st.button("""Show me!"""): if action in [0, 1, 3]: if index_type == "mixed": UpperCAmelCase_ , UpperCAmelCase_ : str = make_support(question, source=wiki_source, method="""dense""", n_results=10) UpperCAmelCase_ , UpperCAmelCase_ : int = make_support(question, source=wiki_source, method="""sparse""", n_results=10) UpperCAmelCase_ : Optional[int] = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] UpperCAmelCase_ : List[Any] = support_list[:10] UpperCAmelCase_ : Tuple = """<P> """ + """ <P> """.join([res[-1] for res in support_list]) else: UpperCAmelCase_ , UpperCAmelCase_ : Any = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: UpperCAmelCase_ , UpperCAmelCase_ : str = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == """sampled"""), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("""### The model generated answer is:""") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""") for i, res in enumerate(support_list): UpperCAmelCase_ : Optional[int] = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_""")) UpperCAmelCase_ : Optional[int] = res[1].strip() if sec_titles == "": UpperCAmelCase_ : Dict = """[{}]({})""".format(res[0], wiki_url) else: UpperCAmelCase_ : str = sec_titles.split(""" & """) UpperCAmelCase_ : Dict = """ & """.join( ["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list] ) st.markdown( """{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( """> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True ) if action in [2, 3]: UpperCAmelCase_ : int = find_nearest_training(question) UpperCAmelCase_ : List[Any] = nn_train_list[0] st.markdown( """--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""]) ) UpperCAmelCase_ : str = [ """{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""])) for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""])) if i == 0 or sc > 2 ] st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st))) UpperCAmelCase_ : Dict = """ --- **Disclaimer** *The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system. Evaluating biases of such a model and ensuring factual generations are still very much open research problems. Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.* """ st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
91
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def a__ ( ) -> Union[str, Any]: lowerCamelCase = ArgumentParser( description=( """PyTorch TPU distributed training launch """ """helper utility that will spawn up """ """multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=snake_case__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=snake_case__ , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=snake_case__ ) return parser.parse_args() def a__ ( ) -> List[str]: lowerCamelCase = parse_args() # Import training_script as a module. lowerCamelCase = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowerCamelCase = script_fpath.stem lowerCamelCase = importlib.import_module(snake_case__ ) # Patch sys.argv lowerCamelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
291
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) UpperCamelCase__ = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ["""ViTFeatureExtractor"""] UpperCamelCase__ = ["""ViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """VIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTForImageClassification""", """ViTForMaskedImageModeling""", """ViTModel""", """ViTPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """TFViTForImageClassification""", """TFViTModel""", """TFViTPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """FlaxViTForImageClassification""", """FlaxViTModel""", """FlaxViTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : List[str] = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "sew-d" def __init__( self , _a=32 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a=2 , _a=512 , _a=256 , _a=True , _a=True , _a=("p2c", "c2p") , _a="layer_norm" , _a="gelu_python" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=0.02 , _a=1e-7 , _a=1e-5 , _a="group" , _a="gelu" , _a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _a=False , _a=128 , _a=16 , _a=True , _a=0.05 , _a=10 , _a=2 , _a=0.0 , _a=10 , _a=0 , _a="mean" , _a=False , _a=False , _a=256 , _a=0 , _a=1 , _a=2 , **_a , ): """simple docstring""" super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a ) lowerCamelCase = hidden_size lowerCamelCase = feat_extract_norm lowerCamelCase = feat_extract_activation lowerCamelCase = list(_a ) lowerCamelCase = list(_a ) lowerCamelCase = list(_a ) lowerCamelCase = conv_bias lowerCamelCase = num_conv_pos_embeddings lowerCamelCase = num_conv_pos_embedding_groups lowerCamelCase = len(self.conv_dim ) lowerCamelCase = num_hidden_layers lowerCamelCase = intermediate_size lowerCamelCase = squeeze_factor lowerCamelCase = max_position_embeddings lowerCamelCase = position_buckets lowerCamelCase = share_att_key lowerCamelCase = relative_attention lowerCamelCase = norm_rel_ebd lowerCamelCase = list(_a ) lowerCamelCase = hidden_act lowerCamelCase = num_attention_heads lowerCamelCase = hidden_dropout lowerCamelCase = attention_dropout lowerCamelCase = activation_dropout lowerCamelCase = feat_proj_dropout lowerCamelCase = final_dropout lowerCamelCase = layer_norm_eps lowerCamelCase = feature_layer_norm_eps lowerCamelCase = initializer_range lowerCamelCase = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase = apply_spec_augment lowerCamelCase = mask_time_prob lowerCamelCase = mask_time_length lowerCamelCase = mask_time_min_masks lowerCamelCase = mask_feature_prob lowerCamelCase = mask_feature_length lowerCamelCase = mask_feature_min_masks # ctc loss lowerCamelCase = ctc_loss_reduction lowerCamelCase = ctc_zero_infinity # sequence classification lowerCamelCase = use_weighted_layer_sum lowerCamelCase = classifier_proj_size @property def _lowerCAmelCase ( self ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
291
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : str = { "configuration_xlm_roberta_xl": [ "XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaXLConfig", "XLMRobertaXLOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[str] = [ "XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMRobertaXLForCausalLM", "XLMRobertaXLForMaskedLM", "XLMRobertaXLForMultipleChoice", "XLMRobertaXLForQuestionAnswering", "XLMRobertaXLForSequenceClassification", "XLMRobertaXLForTokenClassification", "XLMRobertaXLModel", "XLMRobertaXLPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys _lowercase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
93
"""simple docstring""" from sklearn.metrics import recall_score import datasets lowerCAmelCase : Any = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ lowerCAmelCase : Any = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ lowerCAmelCase : Any = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def _lowerCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None , _a="warn" , ): """simple docstring""" lowerCamelCase = recall_score( _a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a , zero_division=_a , ) return {"recall": float(_a ) if score.size == 1 else score}
291
0
from math import pow def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , ): """simple docstring""" if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count a :List[Any] = int(pow(UpperCAmelCase_ , UpperCAmelCase_ ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n a , a :List[Any] = backtrack( UpperCAmelCase_ , UpperCAmelCase_ , current_number + 1 , UpperCAmelCase_ , UpperCAmelCase_ ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. a , a :List[str] = backtrack( UpperCAmelCase_ , UpperCAmelCase_ , current_number + 1 , UpperCAmelCase_ , UpperCAmelCase_ ) return current_sum, solutions_count def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" if not (1 <= needed_sum <= 1000 and 2 <= power <= 10): raise ValueError( '''Invalid input\n''' '''needed_sum must be between 1 and 1000, power between 2 and 10.''' ) return backtrack(UpperCAmelCase_ , UpperCAmelCase_ , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
94
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = dataset lowerCamelCase = process lowerCamelCase = params def __len__( self ): """simple docstring""" return len(self.dataset ) def __getitem__( self , _a ): """simple docstring""" lowerCamelCase = self.dataset[i] lowerCamelCase = self.process(_a , **self.params ) return processed class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a , _a , _a=None ): """simple docstring""" lowerCamelCase = loader lowerCamelCase = infer lowerCamelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether lowerCamelCase = None lowerCamelCase = loader_batch_size # Internal bookkeeping lowerCamelCase = None lowerCamelCase = None def __len__( self ): """simple docstring""" return len(self.loader ) def __iter__( self ): """simple docstring""" lowerCamelCase = iter(self.loader ) return self def _lowerCAmelCase ( self ): """simple docstring""" if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice lowerCamelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) lowerCamelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(_a , _a ): # Convert ModelOutput to tuple first lowerCamelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_a , _a ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around lowerCamelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. lowerCamelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 lowerCamelCase = self._loader_batch_data.__class__(_a ) self._loader_batch_index += 1 return result def _lowerCAmelCase ( self ): """simple docstring""" if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch lowerCamelCase = next(self.iterator ) lowerCamelCase = self.infer(_a , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_a , torch.Tensor ): lowerCamelCase = processed else: lowerCamelCase = list(processed.keys() )[0] lowerCamelCase = processed[key] if isinstance(_a , _a ): lowerCamelCase = len(_a ) else: lowerCamelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase = observed_batch_size # Setting internal index to unwrap the batch lowerCamelCase = processed lowerCamelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a , _a , _a=None ): """simple docstring""" super().__init__(_a , _a , _a ) def __iter__( self ): """simple docstring""" lowerCamelCase = iter(self.loader ) lowerCamelCase = None return self def _lowerCAmelCase ( self ): """simple docstring""" if self.subiterator is None: lowerCamelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item lowerCamelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators lowerCamelCase = self.infer(next(self.iterator ) , **self.params ) lowerCamelCase = next(self.subiterator ) return processed class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __iter__( self ): """simple docstring""" lowerCamelCase = iter(self.loader ) return self def _lowerCAmelCase ( self ): """simple docstring""" # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. lowerCamelCase = False lowerCamelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: lowerCamelCase = self.loader_batch_item() lowerCamelCase = item.pop("""is_last""" ) accumulator.append(_a ) if is_last: return accumulator while not is_last: lowerCamelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_a , torch.Tensor ): lowerCamelCase = processed else: lowerCamelCase = list(processed.keys() )[0] lowerCamelCase = processed[key] if isinstance(_a , _a ): lowerCamelCase = len(_a ) else: lowerCamelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase = observed_batch_size lowerCamelCase = processed lowerCamelCase = 0 while self._loader_batch_index < self.loader_batch_size: lowerCamelCase = self.loader_batch_item() lowerCamelCase = item.pop("""is_last""" ) accumulator.append(_a ) if is_last: return accumulator else: lowerCamelCase = processed lowerCamelCase = item.pop("""is_last""" ) accumulator.append(_a ) return accumulator class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a ): """simple docstring""" lowerCamelCase = dataset lowerCamelCase = key def __len__( self ): """simple docstring""" return len(self.dataset ) def __getitem__( self , _a ): """simple docstring""" return self.dataset[i][self.key] class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = dataset lowerCamelCase = keya lowerCamelCase = keya def __len__( self ): """simple docstring""" return len(self.dataset ) def __getitem__( self , _a ): """simple docstring""" return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
291
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig UpperCAmelCase : List[str] = { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""", } class __lowerCAmelCase ( UpperCamelCase__): _lowercase : List[str] = """albert""" def __init__( self , lowerCAmelCase__=3_0_0_0_0 , lowerCAmelCase__=1_2_8 , lowerCAmelCase__=4_0_9_6 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1 , lowerCAmelCase__=6_4 , lowerCAmelCase__=1_6_3_8_4 , lowerCAmelCase__=1 , lowerCAmelCase__="gelu_new" , lowerCAmelCase__=0 , lowerCAmelCase__=0 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0.1 , lowerCAmelCase__="absolute" , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , **lowerCAmelCase__ , ) -> int: '''simple docstring''' super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) a__ : str =vocab_size a__ : Optional[int] =embedding_size a__ : Optional[Any] =hidden_size a__ : List[str] =num_hidden_layers a__ : Any =num_hidden_groups a__ : int =num_attention_heads a__ : int =inner_group_num a__ : List[str] =hidden_act a__ : Tuple =intermediate_size a__ : List[Any] =hidden_dropout_prob a__ : List[str] =attention_probs_dropout_prob a__ : str =max_position_embeddings a__ : str =type_vocab_size a__ : List[str] =initializer_range a__ : List[Any] =layer_norm_eps a__ : Optional[Any] =classifier_dropout_prob a__ : Tuple =position_embedding_type class __lowerCAmelCase ( UpperCamelCase__): @property def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": a__ : Any ={0: "batch", 1: "choice", 2: "sequence"} else: a__ : Dict ={0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
95
"""simple docstring""" def a__ ( snake_case__ ) -> bool: lowerCamelCase = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def a__ ( snake_case__ = 50_00 ) -> int: lowerCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , snake_case__ )] for i, pentagonal_i in enumerate(snake_case__ ): for j in range(snake_case__ , len(snake_case__ ) ): lowerCamelCase = pentagonal_nums[j] lowerCamelCase = pentagonal_i + pentagonal_j lowerCamelCase = pentagonal_j - pentagonal_i if is_pentagonal(snake_case__ ) and is_pentagonal(snake_case__ ): return b return -1 if __name__ == "__main__": print(F"""{solution() = }""")
291
0
"""simple docstring""" import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase=1024 , lowercase=1024 , lowercase=3.6 ): _lowerCamelCase : Union[str, Any] = tokenizer _lowerCamelCase : Any = tokenizer.bos_token_id _lowerCamelCase : Optional[int] = dataset _lowerCamelCase : Optional[int] = seq_length _lowerCamelCase : Union[str, Any] = seq_length * chars_per_token * num_of_sequences def __iter__( self ): _lowerCamelCase : Optional[int] = iter(self.dataset ) _lowerCamelCase : Dict = True while more_examples: _lowerCamelCase, _lowerCamelCase : Dict = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(lowercase )['content'] ) buffer_len += len(buffer[-1] ) except StopIteration: _lowerCamelCase : Optional[Any] = False break _lowerCamelCase : Optional[Any] = tokenizer(lowercase , truncation=lowercase )['input_ids'] _lowerCamelCase : Any = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(lowercase ) , self.seq_length ): _lowerCamelCase : List[str] = all_token_ids[i : i + self.seq_length] if len(lowercase ) == self.seq_length: yield torch.tensor(lowercase ) def _snake_case ( lowercase__ ): _lowerCamelCase : List[Any] = {'streaming': True} _lowerCamelCase : str = load_dataset(args.dataset_name , split='train' , **lowercase__ ) _lowerCamelCase : Dict = ConstantLengthDataset(lowercase__ , lowercase__ , seq_length=args.seq_length ) _lowerCamelCase : str = DataLoader(lowercase__ , batch_size=args.batch_size ) return eval_dataloader def _snake_case ( lowercase__ ): model.eval() _lowerCamelCase : Tuple = [] for step, batch in enumerate(lowercase__ ): with torch.no_grad(): _lowerCamelCase : str = model(lowercase__ , labels=lowercase__ ) _lowerCamelCase : Optional[Any] = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(lowercase__ ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break _lowerCamelCase : str = torch.mean(torch.cat(lowercase__ ) ) try: _lowerCamelCase : Any = torch.exp(lowercase__ ) except OverflowError: _lowerCamelCase : int = float('inf' ) return loss.item(), perplexity.item() # Setup Accelerator lowercase__ = Accelerator() # Parse configuration lowercase__ = HfArgumentParser(EvaluationArguments) lowercase__ = parser.parse_args() set_seed(args.seed) # Logging lowercase__ = logging.getLogger(__name__) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) # Load model and tokenizer lowercase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt) lowercase__ = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader lowercase__ = create_dataloader(args) # Prepare everything with our `accelerator`. lowercase__ , lowercase__ = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info("""Evaluating and saving model after training""") lowercase__ , lowercase__ = evaluate(args) logger.info(F"loss/eval: {eval_loss}, perplexity: {perplexity}")
96
"""simple docstring""" from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging lowerCAmelCase : Tuple = logging.get_logger(__name__) def a__ ( snake_case__ , snake_case__ ) -> Tuple: try: with open(snake_case__ , """rb""" ) as flax_state_f: lowerCamelCase = from_bytes(snake_case__ , flax_state_f.read() ) except UnpicklingError as e: try: with open(snake_case__ ) as f: if f.read().startswith("""version""" ): raise OSError( """You seem to have cloned a repository without having git-lfs installed. Please""" """ install git-lfs and run `git lfs install` followed by `git lfs pull` in the""" """ folder you cloned.""" ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def a__ ( snake_case__ , snake_case__ ) -> Tuple: try: import torch # noqa: F401 except ImportError: logger.error( """Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights lowerCamelCase = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) lowerCamelCase = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) lowerCamelCase = """""" lowerCamelCase = flatten_dict(snake_case__ , sep=""".""" ) lowerCamelCase = pt_model.state_dict() # keep track of unexpected & missing keys lowerCamelCase = [] lowerCamelCase = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCamelCase = flax_key_tuple.split(""".""" ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""] lowerCamelCase = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""] lowerCamelCase = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(snake_case__ ): lowerCamelCase = ( flax_key_tuple_string.replace("""_0""" , """.0""" ) .replace("""_1""" , """.1""" ) .replace("""_2""" , """.2""" ) .replace("""_3""" , """.3""" ) .replace("""_4""" , """.4""" ) .replace("""_5""" , """.5""" ) .replace("""_6""" , """.6""" ) .replace("""_7""" , """.7""" ) .replace("""_8""" , """.8""" ) .replace("""_9""" , """.9""" ) ) lowerCamelCase = """.""".join(snake_case__ ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict lowerCamelCase = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor lowerCamelCase = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list lowerCamelCase = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) if len(snake_case__ ) > 0: logger.warning( F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' """ use it for predictions and inference.""" ) return pt_model
291
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __snake_case = logging.get_logger(__name__) __snake_case = {'''vocab_file''': '''vocab.txt'''} __snake_case = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } __snake_case = { '''YituTech/conv-bert-base''': 512, '''YituTech/conv-bert-medium-small''': 512, '''YituTech/conv-bert-small''': 512, } __snake_case = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class lowercase ( A__ ): """simple docstring""" _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_INIT_CONFIGURATION _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = ConvBertTokenizer def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ): '''simple docstring''' super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , ) UpperCamelCase__ :Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars ): UpperCamelCase__ :List[Any] = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) ) UpperCamelCase__ :Any = do_lower_case UpperCamelCase__ :Optional[Any] = strip_accents UpperCamelCase__ :List[Any] = tokenize_chinese_chars UpperCamelCase__ :Any = normalizer_class(**UpperCamelCase_ ) UpperCamelCase__ :Tuple = do_lower_case def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ): '''simple docstring''' UpperCamelCase__ :Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ): '''simple docstring''' UpperCamelCase__ :List[str] = [self.sep_token_id] UpperCamelCase__ :str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ): '''simple docstring''' UpperCamelCase__ :str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ )
97
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: lowerCAmelCase : int = None lowerCAmelCase : Tuple = logging.get_logger(__name__) lowerCAmelCase : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Union[str, Any] = { """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", }, """tokenizer_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""", }, } lowerCAmelCase : Optional[int] = { """xlnet-base-cased""": None, """xlnet-large-cased""": None, } lowerCAmelCase : Union[str, Any] = """▁""" # Segments (not really needed) lowerCAmelCase : str = 0 lowerCAmelCase : Optional[int] = 1 lowerCAmelCase : Tuple = 2 lowerCAmelCase : Optional[Any] = 3 lowerCAmelCase : List[Any] = 4 class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = "left" __UpperCamelCase = XLNetTokenizer def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , **_a , ): """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token super().__init__( vocab_file=_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , ) lowerCamelCase = 3 lowerCamelCase = do_lower_case lowerCamelCase = remove_space lowerCamelCase = keep_accents lowerCamelCase = vocab_file lowerCamelCase = False if not self.vocab_file else True def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" lowerCamelCase = [self.sep_token_id] lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" lowerCamelCase = [self.sep_token_id] lowerCamelCase = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(_a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowerCamelCase = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
291
0
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging lowerCAmelCase__ : List[Any] = logging.get_logger(__name__) class snake_case ( __UpperCAmelCase ): """simple docstring""" def __init__( self : List[str] ,lowerCamelCase__ : Union[List[ControlNetModel], Tuple[ControlNetModel]] ): super().__init__() UpperCAmelCase__ = nn.ModuleList(lowerCamelCase__ ) def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : Union[torch.Tensor, float, int] ,lowerCamelCase__ : torch.Tensor ,lowerCamelCase__ : List[torch.tensor] ,lowerCamelCase__ : List[float] ,lowerCamelCase__ : Optional[torch.Tensor] = None ,lowerCamelCase__ : Optional[torch.Tensor] = None ,lowerCamelCase__ : Optional[torch.Tensor] = None ,lowerCamelCase__ : Optional[Dict[str, Any]] = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = True ,): for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase__ ,lowerCamelCase__ ,self.nets ) ): UpperCAmelCase__ , UpperCAmelCase__ = controlnet( lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,) # merge samples if i == 0: UpperCAmelCase__ , UpperCAmelCase__ = down_samples, mid_sample else: UpperCAmelCase__ = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(lowerCamelCase__ ,lowerCamelCase__ ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Union[str, os.PathLike] ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Callable = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[str] = None ,): UpperCAmelCase__ = 0 UpperCAmelCase__ = save_directory for controlnet in self.nets: controlnet.save_pretrained( lowerCamelCase__ ,is_main_process=lowerCamelCase__ ,save_function=lowerCamelCase__ ,safe_serialization=lowerCamelCase__ ,variant=lowerCamelCase__ ,) idx += 1 UpperCAmelCase__ = model_path_to_save + f'''_{idx}''' @classmethod def __lowerCAmelCase ( cls : str ,lowerCamelCase__ : Optional[Union[str, os.PathLike]] ,**lowerCamelCase__ : str ): UpperCAmelCase__ = 0 UpperCAmelCase__ = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... UpperCAmelCase__ = pretrained_model_path while os.path.isdir(lowerCamelCase__ ): UpperCAmelCase__ = ControlNetModel.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ ) controlnets.append(lowerCamelCase__ ) idx += 1 UpperCAmelCase__ = pretrained_model_path + f'''_{idx}''' logger.info(f'''{len(lowerCamelCase__ )} controlnets loaded from {pretrained_model_path}.''' ) if len(lowerCamelCase__ ) == 0: raise ValueError( f'''No ControlNets found under {os.path.dirname(lowerCamelCase__ )}. Expected at least {pretrained_model_path + "_0"}.''' ) return cls(lowerCamelCase__ )
98
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class __magic_name__ ( unittest.TestCase ): '''simple docstring''' __UpperCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __UpperCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def _lowerCAmelCase ( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = TextaTextGenerationPipeline(model=_a , tokenizer=_a ) return generator, ["Something to write", "Something else"] def _lowerCAmelCase ( self , _a , _a ): """simple docstring""" lowerCamelCase = generator("""Something there""" ) self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) lowerCamelCase = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) lowerCamelCase = generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) with self.assertRaises(_a ): generator(4 ) @require_torch def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" ) # do_sample=False necessary for reproducibility lowerCamelCase = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] ) lowerCamelCase = 3 lowerCamelCase = generator( """Something there""" , num_return_sequences=_a , num_beams=_a , ) lowerCamelCase = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_a , _a ) lowerCamelCase = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a ) self.assertEqual( _a , [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] , ) lowerCamelCase = generator.model.config.eos_token_id lowerCamelCase = """<pad>""" lowerCamelCase = generator( ["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , ) self.assertEqual( _a , [ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] , ) @require_tf def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" ) # do_sample=False necessary for reproducibility lowerCamelCase = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] )
291
0
import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter lowercase : Union[str, Any] = True except ImportError: lowercase : List[str] = False lowercase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name def A_ ( A__ ) -> Dict: return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class A__ ( __UpperCAmelCase ): """simple docstring""" @staticmethod def __lowercase ( lowercase) -> Union[str, Any]: '''simple docstring''' a__ : Dict = parser.add_parser('add-new-model') add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.') add_new_model_parser.add_argument('--testing_file' , type=lowercase , help='Configuration file on which to run.') add_new_model_parser.add_argument( '--path' , type=lowercase , help='Path to cookiecutter. Should only be used for testing purposes.') add_new_model_parser.set_defaults(func=lowercase) def __init__( self , lowercase , lowercase , lowercase=None , *lowercase) -> Union[str, Any]: '''simple docstring''' a__ : Optional[int] = testing a__ : int = testing_file a__ : str = path def __lowercase ( self) -> Optional[int]: '''simple docstring''' warnings.warn( 'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ' 'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ' 'checks, you should use `transformers-cli add-new-model-like` instead.') if not _has_cookiecutter: raise ImportError( 'Model creation dependencies are required to use the `add_new_model` command. Install them by running ' 'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n') # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory a__ : List[Any] = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]] if len(lowercase) > 0: raise ValueError( 'Several directories starting with `cookiecutter-template-` in current working directory. ' 'Please clean your directory by removing all folders starting with `cookiecutter-template-` or ' 'change your working directory.') a__ : Tuple = ( Path(lowercase).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent ) a__ : Optional[Any] = path_to_transformer_root / 'templates' / 'adding_a_new_model' # Execute cookiecutter if not self._testing: cookiecutter(str(lowercase)) else: with open(self._testing_file , 'r') as configuration_file: a__ : Any = json.load(lowercase) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path) , no_input=lowercase , extra_context=lowercase , ) a__ : str = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0] # Retrieve configuration with open(directory + '/configuration.json' , 'r') as configuration_file: a__ : List[str] = json.load(lowercase) a__ : List[Any] = configuration['lowercase_modelname'] a__ : List[str] = configuration['generate_tensorflow_pytorch_and_flax'] os.remove(F'{directory}/configuration.json') a__ : List[Any] = 'PyTorch' in generate_tensorflow_pytorch_and_flax a__ : Union[str, Any] = 'TensorFlow' in generate_tensorflow_pytorch_and_flax a__ : List[Any] = 'Flax' in generate_tensorflow_pytorch_and_flax a__ : List[str] = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}' os.makedirs(lowercase , exist_ok=lowercase) os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=lowercase) # Tests require submodules as they have parent imports with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , 'w'): pass shutil.move( F'{directory}/__init__.py' , F'{model_dir}/__init__.py' , ) shutil.move( F'{directory}/configuration_{lowercase_model_name}.py' , F'{model_dir}/configuration_{lowercase_model_name}.py' , ) def remove_copy_lines(lowercase): with open(lowercase , 'r') as f: a__ : Optional[int] = f.readlines() with open(lowercase , 'w') as f: for line in lines: if "# Copied from transformers." not in line: f.write(lowercase) if output_pytorch: if not self._testing: remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py') shutil.move( F'{directory}/modeling_{lowercase_model_name}.py' , F'{model_dir}/modeling_{lowercase_model_name}.py' , ) shutil.move( F'{directory}/test_modeling_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , ) else: os.remove(F'{directory}/modeling_{lowercase_model_name}.py') os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py') if output_tensorflow: if not self._testing: remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py') shutil.move( F'{directory}/modeling_tf_{lowercase_model_name}.py' , F'{model_dir}/modeling_tf_{lowercase_model_name}.py' , ) shutil.move( F'{directory}/test_modeling_tf_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , ) else: os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py') os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py') if output_flax: if not self._testing: remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py') shutil.move( F'{directory}/modeling_flax_{lowercase_model_name}.py' , F'{model_dir}/modeling_flax_{lowercase_model_name}.py' , ) shutil.move( F'{directory}/test_modeling_flax_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , ) else: os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py') os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py') shutil.move( F'{directory}/{lowercase_model_name}.md' , F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , ) shutil.move( F'{directory}/tokenization_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}.py' , ) shutil.move( F'{directory}/tokenization_fast_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(lowercase , lowercase , lowercase): # Create temp file a__ , a__ : Dict = mkstemp() a__ : Dict = False with fdopen(lowercase , 'w') as new_file: with open(lowercase) as old_file: for line in old_file: new_file.write(lowercase) if line_to_copy_below in line: a__ : int = True for line_to_copy in lines_to_copy: new_file.write(lowercase) if not line_found: raise ValueError(F'Line {line_to_copy_below} was not found in file.') # Copy the file permissions from the old file to the new file copymode(lowercase , lowercase) # Remove original file remove(lowercase) # Move new file move(lowercase , lowercase) def skip_units(lowercase): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(lowercase): with open(lowercase) as datafile: a__ : Any = [] a__ : List[Any] = False a__ : str = False for line in datafile: if "# To replace in: " in line and "##" not in line: a__ : Dict = line.split('"')[1] a__ : Tuple = skip_units(lowercase) elif "# Below: " in line and "##" not in line: a__ : Optional[int] = line.split('"')[1] a__ : Union[str, Any] = skip_units(lowercase) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(lowercase , lowercase , lowercase) a__ : Optional[Any] = [] elif "# Replace with" in line and "##" not in line: a__ : List[Any] = [] elif "##" not in line: lines_to_copy.append(lowercase) remove(lowercase) replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py') os.rmdir(lowercase)
99
"""simple docstring""" def a__ ( snake_case__ , snake_case__ = False ) -> str: if not isinstance(snake_case__ , snake_case__ ): lowerCamelCase = F'Expected string as input, found {type(snake_case__ )}' raise ValueError(snake_case__ ) if not isinstance(snake_case__ , snake_case__ ): lowerCamelCase = F'Expected boolean as use_pascal parameter, found {type(snake_case__ )}' raise ValueError(snake_case__ ) lowerCamelCase = input_str.split("""_""" ) lowerCamelCase = 0 if use_pascal else 1 lowerCamelCase = words[start_index:] lowerCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize] lowerCamelCase = """""" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
291
0
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm __magic_name__ = logging.get_logger(__name__) @dataclass class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" __lowercase : Dict = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self , **lowerCAmelCase__): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __SCREAMING_SNAKE_CASE = deprecated_arg[3:] setattr(self , lowerCAmelCase__ , not kwargs.pop(lowerCAmelCase__)) logger.warning( f"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or" f" {positive_arg}={kwargs[positive_arg]}") __SCREAMING_SNAKE_CASE = kwargs.pop("""torchscript""" , self.torchscript) __SCREAMING_SNAKE_CASE = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics) __SCREAMING_SNAKE_CASE = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level) super().__init__(**lowerCAmelCase__) __lowercase : bool = field(default=__a , metadata={'''help''': '''Trace the models using torchscript'''} ) __lowercase : bool = field(default=__a , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} ) __lowercase : str = field( default='''O1''' , metadata={ '''help''': ( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ''' '''See details at https://nvidia.github.io/apex/amp.html''' ) } , ) @cached_property def snake_case_ ( self): requires_backends(self , ["""torch"""]) logger.info("""PyTorch: setting up devices""") if not self.cuda: __SCREAMING_SNAKE_CASE = torch.device("""cpu""") __SCREAMING_SNAKE_CASE = 0 elif is_torch_tpu_available(): __SCREAMING_SNAKE_CASE = xm.xla_device() __SCREAMING_SNAKE_CASE = 0 else: __SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""") __SCREAMING_SNAKE_CASE = torch.cuda.device_count() return device, n_gpu @property def snake_case_ ( self): return is_torch_tpu_available() and self.tpu @property def snake_case_ ( self): requires_backends(self , ["""torch"""]) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def snake_case_ ( self): requires_backends(self , ["""torch"""]) return self._setup_devices[0] @property def snake_case_ ( self): requires_backends(self , ["""torch"""]) return self._setup_devices[1] @property def snake_case_ ( self): return self.n_gpu > 0
100
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch lowerCAmelCase : int = logging.get_logger(__name__) class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["pixel_values"] def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = None , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ): """simple docstring""" super().__init__(**_a ) lowerCamelCase = size if size is not None else {"""shortest_edge""": 256} lowerCamelCase = get_size_dict(_a , default_to_square=_a ) lowerCamelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" ) lowerCamelCase = do_resize lowerCamelCase = size lowerCamelCase = resample lowerCamelCase = do_center_crop lowerCamelCase = crop_size lowerCamelCase = do_rescale lowerCamelCase = rescale_factor lowerCamelCase = do_normalize lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ): """simple docstring""" lowerCamelCase = get_size_dict(_a , default_to_square=_a ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) lowerCamelCase = get_resize_output_image_size(_a , size=size["""shortest_edge"""] , default_to_square=_a ) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a , _a = None , **_a , ): """simple docstring""" lowerCamelCase = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a , _a = None , **_a ): """simple docstring""" return rescale(_a , scale=_a , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a , _a , _a = None , **_a , ): """simple docstring""" return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ): """simple docstring""" lowerCamelCase = do_resize if do_resize is not None else self.do_resize lowerCamelCase = size if size is not None else self.size lowerCamelCase = get_size_dict(_a , default_to_square=_a ) lowerCamelCase = resample if resample is not None else self.resample lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase = crop_size if crop_size is not None else self.crop_size lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" ) lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase = image_mean if image_mean is not None else self.image_mean lowerCamelCase = image_std if image_std is not None else self.image_std lowerCamelCase = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowerCamelCase = [to_numpy_array(_a ) for image in images] if do_resize: lowerCamelCase = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_center_crop: lowerCamelCase = [self.center_crop(image=_a , size=_a ) for image in images] if do_rescale: lowerCamelCase = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: lowerCamelCase = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] lowerCamelCase = [to_channel_dimension_format(_a , _a ) for image in images] lowerCamelCase = {"""pixel_values""": images} return BatchFeature(data=_a , tensor_type=_a ) def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" lowerCamelCase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_a ) != len(_a ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(_a ): lowerCamelCase = target_sizes.numpy() lowerCamelCase = [] for idx in range(len(_a ) ): lowerCamelCase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_a ) lowerCamelCase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_a ) else: lowerCamelCase = logits.argmax(dim=1 ) lowerCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
291
0
from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def UpperCamelCase ( ): '''simple docstring''' lowercase = [randint(-1000 , 1000 ) for i in range(10 )] lowercase = randint(-5000 , 5000 ) return (arr, r) lowercase__ :Dict = make_dataset() def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' for triplet in permutations(lowerCAmelCase__ , 3 ): if sum(lowerCAmelCase__ ) == target: return tuple(sorted(lowerCAmelCase__ ) ) return (0, 0, 0) def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' arr.sort() lowercase = len(lowerCAmelCase__ ) for i in range(n - 1 ): lowercase , lowercase = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def UpperCamelCase ( ): '''simple docstring''' lowercase = ''' from __main__ import dataset, triplet_sum1, triplet_sum2 ''' lowercase = ''' triplet_sum1(*dataset) ''' lowercase = ''' triplet_sum2(*dataset) ''' lowercase = repeat(setup=lowerCAmelCase__ , stmt=lowerCAmelCase__ , repeat=5 , number=1_0000 ) lowercase = repeat(setup=lowerCAmelCase__ , stmt=lowerCAmelCase__ , repeat=5 , number=1_0000 ) return (min(lowerCAmelCase__ ), min(lowerCAmelCase__ )) if __name__ == "__main__": from doctest import testmod testmod() lowercase__ :str = solution_times() print(F'The time for naive implementation is {times[0]}.') print(F'The time for optimized implementation is {times[1]}.')
101
"""simple docstring""" import operator as op lowerCAmelCase : Dict = """scaler.pt""" lowerCAmelCase : Tuple = """pytorch_model""" lowerCAmelCase : Union[str, Any] = """random_states""" lowerCAmelCase : Union[str, Any] = """optimizer""" lowerCAmelCase : Dict = """scheduler""" lowerCAmelCase : int = """pytorch_model.bin""" lowerCAmelCase : str = """pytorch_model.bin.index.json""" lowerCAmelCase : Union[str, Any] = """model.safetensors""" lowerCAmelCase : List[Any] = """model.safetensors.index.json""" lowerCAmelCase : List[Any] = """1.10.2""" lowerCAmelCase : Any = """py38""" lowerCAmelCase : Optional[int] = """4.17.0""" lowerCAmelCase : str = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""] lowerCAmelCase : Tuple = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""] lowerCAmelCase : List[Any] = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""] lowerCAmelCase : List[str] = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""] lowerCAmelCase : List[str] = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""] lowerCAmelCase : Any = """2.0.1""" lowerCAmelCase : List[Any] = ["""pdsh""", """standard""", """openmpi""", """mvapich"""] lowerCAmelCase : Union[str, Any] = ["""default""", """reduce-overhead""", """max-autotune"""] lowerCAmelCase : Optional[int] = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 lowerCAmelCase : Union[str, Any] = [ """nnodes""", """nproc_per_node""", """rdzv_backend""", """rdzv_endpoint""", """rdzv_id""", """rdzv_conf""", """standalone""", """max_restarts""", """monitor_interval""", """start_method""", """role""", """module""", """m""", """no_python""", """run_path""", """log_dir""", """r""", """redirects""", """t""", """tee""", """node_rank""", """master_addr""", """master_port""", ] lowerCAmelCase : List[str] = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""] lowerCAmelCase : Optional[Any] = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
291
0
"""simple docstring""" from statistics import mean import numpy as np def lowercase ( _snake_case : list , _snake_case : list , _snake_case : list , _snake_case : int ) ->list: """simple docstring""" __snake_case : List[str] = 0 # Number of processes finished __snake_case : List[str] = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. __snake_case : Optional[int] = [0] * no_of_process # List to include calculation results __snake_case : Tuple = [0] * no_of_process # Sort by arrival time. __snake_case : int = [burst_time[i] for i in np.argsort(_snake_case )] __snake_case : str = [process_name[i] for i in np.argsort(_snake_case )] arrival_time.sort() while no_of_process > finished_process_count: __snake_case : List[str] = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: __snake_case : List[Any] = arrival_time[i] __snake_case : Union[str, Any] = 0 # Index showing the location of the process being performed __snake_case : Dict = 0 # Saves the current response ratio. __snake_case : List[Any] = 0 for i in range(0 , _snake_case ): if finished_process[i] == 0 and arrival_time[i] <= current_time: __snake_case : int = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: __snake_case : List[str] = temp __snake_case : Any = i # Calculate the turn around time __snake_case : int = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. __snake_case : Union[str, Any] = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def lowercase ( _snake_case : list , _snake_case : list , _snake_case : list , _snake_case : int ) ->list: """simple docstring""" __snake_case : Optional[int] = [0] * no_of_process for i in range(0 , _snake_case ): __snake_case : int = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": SCREAMING_SNAKE_CASE : Union[str, Any] = 5 SCREAMING_SNAKE_CASE : Dict = ["""A""", """B""", """C""", """D""", """E"""] SCREAMING_SNAKE_CASE : List[str] = [1, 2, 3, 4, 5] SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 2, 3, 4, 5] SCREAMING_SNAKE_CASE : List[Any] = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) SCREAMING_SNAKE_CASE : Union[str, Any] = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""") for i in range(0, no_of_process): print( F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t' F'{turn_around_time[i]}\t\t\t{waiting_time[i]}' ) print(F'average waiting time : {mean(waiting_time):.5f}') print(F'average turn around time : {mean(turn_around_time):.5f}')
102
"""simple docstring""" import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __magic_name__ : '''simple docstring''' def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ): """simple docstring""" lowerCamelCase = parent lowerCamelCase = batch_size lowerCamelCase = image_size lowerCamelCase = patch_size lowerCamelCase = num_channels lowerCamelCase = is_training lowerCamelCase = use_labels lowerCamelCase = hidden_size lowerCamelCase = num_hidden_layers lowerCamelCase = num_attention_heads lowerCamelCase = intermediate_size lowerCamelCase = hidden_act lowerCamelCase = hidden_dropout_prob lowerCamelCase = attention_probs_dropout_prob lowerCamelCase = type_sequence_label_size lowerCamelCase = initializer_range lowerCamelCase = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase = (image_size // patch_size) ** 2 lowerCamelCase = num_patches + 1 def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase = None if self.use_labels: lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase = self.get_config() return config, pixel_values, labels def _lowerCAmelCase ( self ): """simple docstring""" return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def _lowerCAmelCase ( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = ViTMSNModel(config=_a ) model.to(_a ) model.eval() lowerCamelCase = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = self.type_sequence_label_size lowerCamelCase = ViTMSNForImageClassification(_a ) model.to(_a ) model.eval() lowerCamelCase = model(_a , labels=_a ) print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" ) print("""Labels: {labels}""" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase = 1 lowerCamelCase = ViTMSNForImageClassification(_a ) model.to(_a ) model.eval() lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase = model(_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs lowerCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () __UpperCamelCase = ( {"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification} if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = ViTMSNModelTester(self ) lowerCamelCase = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 ) def _lowerCAmelCase ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMSN does not use inputs_embeds""" ) def _lowerCAmelCase ( self ): """simple docstring""" pass def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase = model_class(_a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_a , nn.Linear ) ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase = model_class(_a ) lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase = [*signature.parameters.keys()] lowerCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) @slow def _lowerCAmelCase ( self ): """simple docstring""" for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase = ViTMSNModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def a__ ( ) -> Any: lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowerCAmelCase ( self ): """simple docstring""" return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None @slow def _lowerCAmelCase ( self ): """simple docstring""" torch.manual_seed(2 ) lowerCamelCase = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a ) lowerCamelCase = self.default_image_processor lowerCamelCase = prepare_img() lowerCamelCase = image_processor(images=_a , return_tensors="""pt""" ).to(_a ) # forward pass with torch.no_grad(): lowerCamelCase = model(**_a ) # verify the logits lowerCamelCase = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , _a ) lowerCamelCase = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(_a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
291
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : List[Any] = { '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Any = [ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys A__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
103
"""simple docstring""" import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :] def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__="attention" ) -> List[Any]: lowerCamelCase = lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] ) lowerCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] ) lowerCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] ) lowerCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] ) lowerCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ) -> List[str]: if split_mlp_wi: lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :] lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :] lowerCamelCase = (wi_a, wi_a) else: lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :] lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :] return wi, wo def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple: return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i] def a__ ( snake_case__ , *, snake_case__ , snake_case__ , snake_case__ = False ) -> Dict: lowerCamelCase = traverse_util.flatten_dict(variables["""target"""] ) lowerCamelCase = {"""/""".join(snake_case__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowerCamelCase = """encoder/encoder/mlp/wi_0/kernel""" in old print("""Split MLP:""" , snake_case__ ) lowerCamelCase = collections.OrderedDict() # Shared embeddings. lowerCamelCase = old["""token_embedder/embedding"""] # Encoder. for i in range(snake_case__ ): # Block i, layer 0 (Self Attention). lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_attention_layer_norm""" ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """encoder""" , """attention""" ) lowerCamelCase = layer_norm lowerCamelCase = k.T lowerCamelCase = o.T lowerCamelCase = q.T lowerCamelCase = v.T # Block i, layer 1 (MLP). lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_mlp_layer_norm""" ) lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """encoder""" , snake_case__ ) lowerCamelCase = layer_norm if split_mlp_wi: lowerCamelCase = wi[0].T lowerCamelCase = wi[1].T else: lowerCamelCase = wi.T lowerCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer lowerCamelCase = tax_relpos_bias_lookup( snake_case__ , snake_case__ , """encoder""" ).T lowerCamelCase = old["""encoder/encoder_norm/scale"""] if not scalable_attention: lowerCamelCase = tax_relpos_bias_lookup( snake_case__ , 0 , """encoder""" ).T lowerCamelCase = tax_relpos_bias_lookup( snake_case__ , 0 , """decoder""" ).T if not is_encoder_only: # Decoder. for i in range(snake_case__ ): # Block i, layer 0 (Self Attention). lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_self_attention_layer_norm""" ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """self_attention""" ) lowerCamelCase = layer_norm lowerCamelCase = k.T lowerCamelCase = o.T lowerCamelCase = q.T lowerCamelCase = v.T # Block i, layer 1 (Cross Attention). lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_cross_attention_layer_norm""" ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """encoder_decoder_attention""" ) lowerCamelCase = layer_norm lowerCamelCase = k.T lowerCamelCase = o.T lowerCamelCase = q.T lowerCamelCase = v.T # Block i, layer 2 (MLP). lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_mlp_layer_norm""" ) lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """decoder""" , snake_case__ ) lowerCamelCase = layer_norm if split_mlp_wi: lowerCamelCase = wi[0].T lowerCamelCase = wi[1].T else: lowerCamelCase = wi.T lowerCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer lowerCamelCase = tax_relpos_bias_lookup(snake_case__ , snake_case__ , """decoder""" ).T lowerCamelCase = old["""decoder/decoder_norm/scale"""] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowerCamelCase = old["""decoder/logits_dense/kernel"""].T return new def a__ ( snake_case__ , snake_case__ ) -> Optional[int]: lowerCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowerCamelCase = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowerCamelCase = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) lowerCamelCase = state_dict["""shared.weight"""] return state_dict def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: lowerCamelCase = checkpoints.load_tax_checkpoint(snake_case__ ) lowerCamelCase = convert_tax_to_pytorch( snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ , scalable_attention=snake_case__ ) lowerCamelCase = make_state_dict(snake_case__ , snake_case__ ) model.load_state_dict(snake_case__ , strict=snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = False , ) -> str: lowerCamelCase = MTaConfig.from_json_file(snake_case__ ) print(F'Building PyTorch model from configuration: {config}' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowerCamelCase = UMTaEncoderModel(snake_case__ ) else: lowerCamelCase = UMTaForConditionalGeneration(snake_case__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(snake_case__ ) # Verify that we can load the checkpoint. model.from_pretrained(snake_case__ ) print("""Done""" ) if __name__ == "__main__": lowerCAmelCase : Optional[int] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""") # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False ) parser.add_argument( """--scalable_attention""", action="""store_true""", help="""Whether the model uses scaled attention (umt5 model)""", default=False, ) lowerCAmelCase : int = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
291
0
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _A ( A__ ): """simple docstring""" __lowercase = filter(lambda A__ : p.requires_grad , model.parameters() ) __lowercase = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase__ = logging.getLogger(__name__) def _A ( A__ , A__ ): """simple docstring""" if metric == "rouge2": __lowercase = '''{val_avg_rouge2:.4f}-{step_count}''' elif metric == "bleu": __lowercase = '''{val_avg_bleu:.4f}-{step_count}''' elif metric == "em": __lowercase = '''{val_avg_em:.4f}-{step_count}''' else: raise NotImplementedError( F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this" ''' function.''' ) __lowercase = ModelCheckpoint( dirpath=A__ , filename=A__ , monitor=F"val_{metric}" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def _A ( A__ , A__ ): """simple docstring""" return EarlyStopping( monitor=F"val_{metric}" , mode='''min''' if '''loss''' in metric else '''max''' , patience=A__ , verbose=A__ , ) class lowercase_ (pl.Callback ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Dict ,lowercase__ : int ): __lowercase = {F"lr_group_{i}": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(lowercase__ ) @rank_zero_only def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : pl.Trainer ,lowercase__ : pl.LightningModule ,lowercase__ : str ,lowercase__ : Any=True ): logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" ) __lowercase = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} ) # Log results __lowercase = Path(pl_module.hparams.output_dir ) if type_path == "test": __lowercase = od / '''test_results.txt''' __lowercase = od / '''test_generations.txt''' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __lowercase = od / F"{type_path}_results/{trainer.global_step:05d}.txt" __lowercase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt" results_file.parent.mkdir(exist_ok=lowercase__ ) generations_file.parent.mkdir(exist_ok=lowercase__ ) with open(lowercase__ ,'''a+''' ) as writer: for key in sorted(lowercase__ ): if key in ["log", "progress_bar", "preds"]: continue __lowercase = metrics[key] if isinstance(lowercase__ ,torch.Tensor ): __lowercase = val.item() __lowercase = F"{key}: {val:.6f}\n" writer.write(lowercase__ ) if not save_generations: return if "preds" in metrics: __lowercase = '''\n'''.join(metrics['''preds'''] ) generations_file.open('''w+''' ).write(lowercase__ ) @rank_zero_only def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : Dict ): try: __lowercase = pl_module.model.model.num_parameters() except AttributeError: __lowercase = pl_module.model.num_parameters() __lowercase = count_trainable_parameters(lowercase__ ) # mp stands for million parameters trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} ) @rank_zero_only def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : pl.Trainer ,lowercase__ : pl.LightningModule ): save_json(pl_module.metrics ,pl_module.metrics_save_path ) return self._write_logs(lowercase__ ,lowercase__ ,'''test''' ) @rank_zero_only def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : pl.Trainer ,lowercase__ : Optional[int] ): save_json(pl_module.metrics ,pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
104
"""simple docstring""" from __future__ import annotations def a__ ( snake_case__ , snake_case__ ) -> bool: if len(snake_case__ ) == 0: return False lowerCamelCase = len(snake_case__ ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , snake_case__ ) else: return binary_search(a_list[midpoint + 1 :] , snake_case__ ) if __name__ == "__main__": lowerCAmelCase : List[Any] = input("""Enter numbers separated by comma:\n""").strip() lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(""",""")] lowerCAmelCase : Optional[int] = int(input("""Enter the number to be found in the list:\n""").strip()) lowerCAmelCase : Union[str, Any] = """""" if binary_search(sequence, target) else """not """ print(F"""{target} was {not_str}found in {sequence}""")
291
0
"""simple docstring""" import logging from transformers import PretrainedConfig a : Tuple = logging.getLogger(__name__) a : Dict = { '''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''', } class __UpperCamelCase ( a__ ): lowerCamelCase : Optional[Any] ="""bertabs""" def __init__( self , lowerCAmelCase__=3_0522 , lowerCAmelCase__=512 , lowerCAmelCase__=6 , lowerCAmelCase__=512 , lowerCAmelCase__=8 , lowerCAmelCase__=512 , lowerCAmelCase__=0.2 , lowerCAmelCase__=6 , lowerCAmelCase__=768 , lowerCAmelCase__=8 , lowerCAmelCase__=2048 , lowerCAmelCase__=0.2 , **lowerCAmelCase__ , ) -> int: super().__init__(**lowerCAmelCase__ ) a : Dict = vocab_size a : str = max_pos a : str = enc_layers a : int = enc_hidden_size a : Tuple = enc_heads a : Dict = enc_ff_size a : List[Any] = enc_dropout a : str = dec_layers a : Dict = dec_hidden_size a : Union[str, Any] = dec_heads a : Union[str, Any] = dec_ff_size a : List[Any] = dec_dropout
105
"""simple docstring""" def a__ ( snake_case__ ) -> list: if len(snake_case__ ) < 2: return collection def circle_sort_util(snake_case__ , snake_case__ , snake_case__ ) -> bool: lowerCamelCase = False if low == high: return swapped lowerCamelCase = low lowerCamelCase = high while left < right: if collection[left] > collection[right]: lowerCamelCase , lowerCamelCase = ( collection[right], collection[left], ) lowerCamelCase = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: lowerCamelCase , lowerCamelCase = ( collection[right + 1], collection[left], ) lowerCamelCase = True lowerCamelCase = low + int((high - low) / 2 ) lowerCamelCase = circle_sort_util(snake_case__ , snake_case__ , snake_case__ ) lowerCamelCase = circle_sort_util(snake_case__ , mid + 1 , snake_case__ ) return swapped or left_swap or right_swap lowerCamelCase = True while is_not_sorted is True: lowerCamelCase = circle_sort_util(snake_case__ , 0 , len(snake_case__ ) - 1 ) return collection if __name__ == "__main__": lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip() lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(""",""")] print(circle_sort(unsorted))
291
0
"""simple docstring""" import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowercase__ = ["image_processor", "tokenizer"] lowercase__ = "ViltImageProcessor" lowercase__ = ("BertTokenizer", "BertTokenizerFast") def __init__( self : List[Any] ,lowercase_ : int=None ,lowercase_ : int=None ,**lowercase_ : Union[str, Any] ): lowerCAmelCase__ : List[Any] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' ,lowercase_ ,) lowerCAmelCase__ : Optional[Any] = kwargs.pop('''feature_extractor''' ) lowerCAmelCase__ : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowercase_ ,lowercase_ ) lowerCAmelCase__ : Optional[Any] = self.image_processor def __call__( self : Tuple ,lowercase_ : Tuple ,lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,lowercase_ : bool = True ,lowercase_ : Union[bool, str, PaddingStrategy] = False ,lowercase_ : Union[bool, str, TruncationStrategy] = None ,lowercase_ : Optional[int] = None ,lowercase_ : int = 0 ,lowercase_ : Optional[int] = None ,lowercase_ : Optional[bool] = None ,lowercase_ : Optional[bool] = None ,lowercase_ : bool = False ,lowercase_ : bool = False ,lowercase_ : bool = False ,lowercase_ : bool = False ,lowercase_ : bool = True ,lowercase_ : Optional[Union[str, TensorType]] = None ,**lowercase_ : List[str] ,): lowerCAmelCase__ : List[Any] = self.tokenizer( text=lowercase_ ,add_special_tokens=lowercase_ ,padding=lowercase_ ,truncation=lowercase_ ,max_length=lowercase_ ,stride=lowercase_ ,pad_to_multiple_of=lowercase_ ,return_token_type_ids=lowercase_ ,return_attention_mask=lowercase_ ,return_overflowing_tokens=lowercase_ ,return_special_tokens_mask=lowercase_ ,return_offsets_mapping=lowercase_ ,return_length=lowercase_ ,verbose=lowercase_ ,return_tensors=lowercase_ ,**lowercase_ ,) # add pixel_values + pixel_mask lowerCAmelCase__ : Optional[Any] = self.image_processor(lowercase_ ,return_tensors=lowercase_ ) encoding.update(lowercase_ ) return encoding def __lowerCAmelCase ( self : Tuple ,*lowercase_ : Any ,**lowercase_ : List[str] ): return self.tokenizer.batch_decode(*lowercase_ ,**lowercase_ ) def __lowerCAmelCase ( self : Tuple ,*lowercase_ : str ,**lowercase_ : Tuple ): return self.tokenizer.decode(*lowercase_ ,**lowercase_ ) @property def __lowerCAmelCase ( self : int ): lowerCAmelCase__ : Tuple = self.tokenizer.model_input_names lowerCAmelCase__ : Optional[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __lowerCAmelCase ( self : Union[str, Any] ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,lowercase_ ,) return self.image_processor_class @property def __lowerCAmelCase ( self : int ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' ,lowercase_ ,) return self.image_processor
106
"""simple docstring""" from collections.abc import Generator def a__ ( ) -> Generator[int, None, None]: lowerCamelCase , lowerCamelCase = 0, 1 while True: lowerCamelCase , lowerCamelCase = b, a + b yield b def a__ ( snake_case__ = 10_00 ) -> int: lowerCamelCase = 1 lowerCamelCase = fibonacci_generator() while len(str(next(snake_case__ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
291
0
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time __lowerCAmelCase : str = Lock() def __magic_name__ ( A : Optional[Any], A : Any, A : Union[str, Any], A : List[str], A : int, A : Optional[int], A : Dict ): '''simple docstring''' global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0, 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(A ) process_lock.release() # receive your right neighbor's value process_lock.acquire() a = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left a = min(A, A ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(A ) process_lock.release() # receive your left neighbor's value process_lock.acquire() a = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right a = max(A, A ) # after all swaps are performed, send the values back to main result_pipe[1].send(A ) def __magic_name__ ( A : int ): '''simple docstring''' a = [] a = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop a = Pipe() a = Pipe() process_array_.append( Process( target=A, args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]), ) ) a = temp_rs a = temp_rr for i in range(1, len(A ) - 1 ): a = Pipe() a = Pipe() process_array_.append( Process( target=A, args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]), ) ) a = temp_rs a = temp_rr process_array_.append( Process( target=A, args=( len(A ) - 1, arr[len(A ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(A ) - 1], ), ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0, len(A ) ): a = result_pipe[p][0].recv() process_array_[p].join() return arr def __magic_name__ ( ): '''simple docstring''' a = list(range(10, 0, -1 ) ) print("Initial List" ) print(*A ) a = odd_even_transposition(A ) print("Sorted List\n" ) print(*A ) if __name__ == "__main__": main()
107
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase : List[str] = logging.get_logger(__name__) class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["audio_values", "audio_mask"] def __init__( self , _a=2_048 , _a=1 , _a=[16, 16] , _a=128 , _a=44_100 , _a=86 , _a=2_048 , _a=0.0 , **_a , ): """simple docstring""" super().__init__( feature_size=_a , sampling_rate=_a , padding_value=_a , **_a , ) lowerCamelCase = spectrogram_length lowerCamelCase = num_channels lowerCamelCase = patch_size lowerCamelCase = feature_size // self.patch_size[1] lowerCamelCase = n_fft lowerCamelCase = sampling_rate // hop_length_to_sampling_rate lowerCamelCase = sampling_rate lowerCamelCase = padding_value lowerCamelCase = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=_a , norm="""slaney""" , mel_scale="""slaney""" , ).T def _lowerCAmelCase ( self , _a ): """simple docstring""" lowerCamelCase = spectrogram( _a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , ) lowerCamelCase = log_spec[:, :-1] lowerCamelCase = log_spec - 20.0 lowerCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , _a , _a = None , _a = True , _a = None , _a = False , _a = False , **_a , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( """This feature extractor is set to support sampling rate""" f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled' f' with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) lowerCamelCase = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) lowerCamelCase = is_batched_numpy or ( isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_a , np.ndarray ): lowerCamelCase = np.asarray(_a , dtype=np.floataa ) elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowerCamelCase = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , _a ): lowerCamelCase = [np.asarray(_a , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowerCamelCase = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowerCamelCase = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowerCamelCase = np.array(_a ).astype(np.floataa ) # convert into correct format for padding lowerCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowerCamelCase = np.ones([len(_a ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowerCamelCase = padded_audio_features * self.padding_value for i in range(len(_a ) ): lowerCamelCase = audio_features[i] lowerCamelCase = feature # return as BatchFeature if return_attention_mask: lowerCamelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask} else: lowerCamelCase = {"""audio_values""": padded_audio_features} lowerCamelCase = BatchFeature(data=_a , tensor_type=_a ) return encoded_inputs
291
0
"""simple docstring""" import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 lowerCAmelCase__ = get_tests_dir('''fixtures''') class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Tuple = mock.Mock() lowerCAmelCase : List[str] = 500 lowerCAmelCase : List[str] = {} lowerCAmelCase : Optional[int] = HTTPError lowerCAmelCase : Tuple = {} # Download this model to make sure it's in the cache. lowerCAmelCase : str = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=snake_case__ ) as mock_head: lowerCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" ) # This check we did call the fake head request mock_head.assert_called() def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" ) @is_staging_test class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @classmethod def lowercase__ ( cls ): """simple docstring""" lowerCAmelCase : Optional[Any] = TOKEN HfFolder.save_token(snake_case__ ) @classmethod def lowercase__ ( cls ): """simple docstring""" try: delete_repo(token=cls._token , repo_id="test-feature-extractor" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" ) except HTTPError: pass def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : str = WavaVecaFeatureExtractor.from_pretrained(snake_case__ ) feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token ) lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) ) # Reset repo delete_repo(token=self._token , repo_id="test-feature-extractor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( snake_case__ , repo_id="test-feature-extractor" , push_to_hub=snake_case__ , use_auth_token=self._token ) lowerCAmelCase : int = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : int = WavaVecaFeatureExtractor.from_pretrained(snake_case__ ) feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token ) lowerCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( snake_case__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=snake_case__ , use_auth_token=self._token ) lowerCAmelCase : Dict = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) ) def lowercase__ ( self ): """simple docstring""" CustomFeatureExtractor.register_for_auto_class() lowerCAmelCase : Tuple = CustomFeatureExtractor.from_pretrained(snake_case__ ) feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , ) lowerCAmelCase : str = AutoFeatureExtractor.from_pretrained( f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=snake_case__ ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
108
"""simple docstring""" from math import ceil def a__ ( snake_case__ , snake_case__ ) -> Optional[int]: lowerCamelCase = list(range(0 , snake_case__ ) ) lowerCamelCase = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check lowerCamelCase = [] for i in device_map_blocks: if device_map_blocks.count(snake_case__ ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(snake_case__ ) # Missing blocks lowerCamelCase = [i for i in blocks if i not in device_map_blocks] lowerCamelCase = [i for i in device_map_blocks if i not in blocks] if len(snake_case__ ) != 0: raise ValueError( """Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.""" """ These attention blocks were specified more than once: """ + str(snake_case__ ) ) if len(snake_case__ ) != 0: raise ValueError( """There are attention blocks for this model that are not specified in the device_map. Add these attention """ """blocks to a device on the device_map: """ + str(snake_case__ ) ) if len(snake_case__ ) != 0: raise ValueError( """The device_map contains more attention blocks than this model has. Remove these from the device_map:""" + str(snake_case__ ) ) def a__ ( snake_case__ , snake_case__ ) -> List[Any]: lowerCamelCase = list(range(snake_case__ ) ) lowerCamelCase = int(ceil(n_layers / len(snake_case__ ) ) ) lowerCamelCase = [layers[i : i + n_blocks] for i in range(0 , snake_case__ , snake_case__ )] return dict(zip(snake_case__ , snake_case__ ) )
291
0
"""simple docstring""" import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() A: int = logging.get_logger(__name__) A: Union[str, Any] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "encoder.layer_norm_for_extract": "layer_norm_for_extract", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "label_embs_concat": "label_embeddings_concat", "mask_emb": "masked_spec_embed", "spk_proj": "speaker_proj", } A: Optional[int] = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "label_embeddings_concat", "speaker_proj", "layer_norm_for_extract", ] def _snake_case ( UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ): for attribute in key.split(""".""" ): UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase , UpperCamelCase ) if weight_type is not None: UpperCAmelCase : Optional[int] = getattr(UpperCamelCase , UpperCamelCase ).shape else: UpperCAmelCase : Optional[int] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": UpperCAmelCase : str = value elif weight_type == "weight_g": UpperCAmelCase : int = value elif weight_type == "weight_v": UpperCAmelCase : Optional[int] = value elif weight_type == "bias": UpperCAmelCase : Optional[int] = value else: UpperCAmelCase : Optional[Any] = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : Tuple ): UpperCAmelCase : Tuple = [] UpperCAmelCase : List[Any] = fairseq_model.state_dict() UpperCAmelCase : Optional[int] = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase : str = False if "conv_layers" in name: load_conv_layer( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , ) UpperCAmelCase : Dict = True else: for key, mapped_key in MAPPING.items(): UpperCAmelCase : int = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key): # special case since naming is very similar continue UpperCAmelCase : int = True if "*" in mapped_key: UpperCAmelCase : List[Any] = name.split(UpperCamelCase )[0].split(""".""" )[-2] UpperCAmelCase : Tuple = mapped_key.replace("""*""" , UpperCamelCase ) if "weight_g" in name: UpperCAmelCase : Optional[Any] = """weight_g""" elif "weight_v" in name: UpperCAmelCase : List[str] = """weight_v""" elif "bias" in name: UpperCAmelCase : Any = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase : Optional[int] = """weight""" else: UpperCAmelCase : Dict = None set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) continue if not is_used: unused_weights.append(UpperCamelCase ) logger.warning(F"Unused weights: {unused_weights}" ) def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : List[Any] ): UpperCAmelCase : str = full_name.split("""conv_layers.""" )[-1] UpperCAmelCase : Any = name.split(""".""" ) UpperCAmelCase : str = int(items[0] ) UpperCAmelCase : str = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) UpperCAmelCase : List[Any] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) UpperCAmelCase : List[Any] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." ) UpperCAmelCase : int = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) UpperCAmelCase : Dict = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(UpperCamelCase ) @torch.no_grad() def _snake_case ( UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Union[str, Any]=True ): if config_path is not None: UpperCAmelCase : Optional[Any] = UniSpeechSatConfig.from_pretrained(UpperCamelCase ) else: UpperCAmelCase : Optional[Any] = UniSpeechSatConfig() UpperCAmelCase : str = """""" if is_finetuned: UpperCAmelCase : int = UniSpeechSatForCTC(UpperCamelCase ) else: UpperCAmelCase : Optional[Any] = UniSpeechSatForPreTraining(UpperCamelCase ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) UpperCAmelCase : int = model[0].eval() recursively_load_weights(UpperCamelCase , UpperCamelCase ) hf_wavavec.save_pretrained(UpperCamelCase ) if __name__ == "__main__": A: List[str] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) A: Dict = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
109
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __magic_name__ ( unittest.TestCase ): '''simple docstring''' def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ): """simple docstring""" lowerCamelCase = parent lowerCamelCase = batch_size lowerCamelCase = seq_length lowerCamelCase = is_training lowerCamelCase = use_attention_mask lowerCamelCase = use_token_type_ids lowerCamelCase = use_labels lowerCamelCase = vocab_size lowerCamelCase = hidden_size lowerCamelCase = num_hidden_layers lowerCamelCase = num_attention_heads lowerCamelCase = intermediate_size lowerCamelCase = hidden_act lowerCamelCase = hidden_dropout_prob lowerCamelCase = attention_probs_dropout_prob lowerCamelCase = max_position_embeddings lowerCamelCase = type_vocab_size lowerCamelCase = type_sequence_label_size lowerCamelCase = initializer_range lowerCamelCase = num_choices def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase = None if self.use_attention_mask: lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase = None if self.use_token_type_ids: lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = True __UpperCamelCase = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = FlaxRoFormerModelTester(self ) @slow def _lowerCAmelCase ( self ): """simple docstring""" for model_class_name in self.all_model_classes: lowerCamelCase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_a ) lowerCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(_a ) @require_flax class __magic_name__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) lowerCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase = model(_a )[0] lowerCamelCase = 50_000 lowerCamelCase = (1, 6, vocab_size) self.assertEqual(output.shape , _a ) lowerCamelCase = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
291
0
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase_ ( UpperCAmelCase__ ): __lowerCamelCase : Tuple = ["image_processor", "tokenizer"] __lowerCamelCase : Dict = "LayoutLMv2ImageProcessor" __lowerCamelCase : List[str] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast") def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Optional[int]: if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _a , ) _lowerCAmelCase = kwargs.pop("feature_extractor" ) _lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_a , _a ) def __call__( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = True , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> int: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes " "if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." ) # first, apply the image processor _lowerCAmelCase = self.image_processor(images=_a , return_tensors=_a ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(_a , _a ): _lowerCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension) _lowerCAmelCase = features["words"] _lowerCAmelCase = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , ) # add pixel values _lowerCAmelCase = features.pop("pixel_values" ) if return_overflowing_tokens is True: _lowerCAmelCase = self.get_overflowing_images(_a , encoded_inputs["overflow_to_sample_mapping"] ) _lowerCAmelCase = images return encoded_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: _lowerCAmelCase = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(_a ) != len(_a ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" f''' {len(_a )} and {len(_a )}''' ) return images_with_overflow def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: return self.tokenizer.batch_decode(*_a , **_a ) def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: return self.tokenizer.decode(*_a , **_a ) @property def _snake_case ( self ) -> List[Any]: return ["input_ids", "bbox", "attention_mask", "image"] @property def _snake_case ( self ) -> List[str]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , ) return self.image_processor_class @property def _snake_case ( self ) -> List[Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , ) return self.image_processor
158
"""simple docstring""" from typing import Any def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> list: _validation( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) # Creates data structures and fill initial step lowerCamelCase = {} lowerCamelCase = {} for state in states_space: lowerCamelCase = observations_space[0] lowerCamelCase = ( initial_probabilities[state] * emission_probabilities[state][observation] ) lowerCamelCase = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(snake_case__ ) ): lowerCamelCase = observations_space[o] lowerCamelCase = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function lowerCamelCase = """""" lowerCamelCase = -1 for k_state in states_space: lowerCamelCase = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: lowerCamelCase = probability lowerCamelCase = k_state # Update probabilities and pointers dicts lowerCamelCase = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) lowerCamelCase = arg_max # The final observation lowerCamelCase = observations_space[len(snake_case__ ) - 1] # argmax for given final observation lowerCamelCase = """""" lowerCamelCase = -1 for k_state in states_space: lowerCamelCase = probabilities[(k_state, final_observation)] if probability > max_probability: lowerCamelCase = probability lowerCamelCase = k_state lowerCamelCase = arg_max # Process pointers backwards lowerCamelCase = last_state lowerCamelCase = [] for o in range(len(snake_case__ ) - 1 , -1 , -1 ): result.append(snake_case__ ) lowerCamelCase = pointers[previous, observations_space[o]] result.reverse() return result def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None: _validate_not_empty( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) _validate_lists(snake_case__ , snake_case__ ) _validate_dicts( snake_case__ , snake_case__ , snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None: if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("""There's an empty parameter""" ) def a__ ( snake_case__ , snake_case__ ) -> None: _validate_list(snake_case__ , """observations_space""" ) _validate_list(snake_case__ , """states_space""" ) def a__ ( snake_case__ , snake_case__ ) -> None: if not isinstance(_object , snake_case__ ): lowerCamelCase = F'{var_name} must be a list' raise ValueError(snake_case__ ) else: for x in _object: if not isinstance(snake_case__ , snake_case__ ): lowerCamelCase = F'{var_name} must be a list of strings' raise ValueError(snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , ) -> None: _validate_dict(snake_case__ , """initial_probabilities""" , snake_case__ ) _validate_nested_dict(snake_case__ , """transition_probabilities""" ) _validate_nested_dict(snake_case__ , """emission_probabilities""" ) def a__ ( snake_case__ , snake_case__ ) -> None: _validate_dict(_object , snake_case__ , snake_case__ ) for x in _object.values(): _validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False ) -> None: if not isinstance(_object , snake_case__ ): lowerCamelCase = F'{var_name} must be a dict' raise ValueError(snake_case__ ) if not all(isinstance(snake_case__ , snake_case__ ) for x in _object ): lowerCamelCase = F'{var_name} all keys must be strings' raise ValueError(snake_case__ ) if not all(isinstance(snake_case__ , snake_case__ ) for x in _object.values() ): lowerCamelCase = """nested dictionary """ if nested else """""" lowerCamelCase = F'{var_name} {nested_text}all values must be {value_type.__name__}' raise ValueError(snake_case__ ) if __name__ == "__main__": from doctest import testmod testmod()
291
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _a = logging.get_logger(__name__) _a = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class __A ( UpperCAmelCase__ ): '''simple docstring''' lowerCAmelCase_ = """table-transformer""" lowerCAmelCase_ = ["""past_key_values"""] lowerCAmelCase_ = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=3 , __lowerCAmelCase=1_0_0 , __lowerCAmelCase=6 , __lowerCAmelCase=2_0_4_8 , __lowerCAmelCase=8 , __lowerCAmelCase=6 , __lowerCAmelCase=2_0_4_8 , __lowerCAmelCase=8 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase="relu" , __lowerCAmelCase=2_5_6 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1.0 , __lowerCAmelCase=False , __lowerCAmelCase="sine" , __lowerCAmelCase="resnet50" , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=1 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=1 , __lowerCAmelCase=1 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=0.1 , **__lowerCAmelCase , ): '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowerCamelCase__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_a , _a ): lowerCamelCase__ = backbone_config.get('''model_type''' ) lowerCamelCase__ = CONFIG_MAPPING[backbone_model_type] lowerCamelCase__ = config_class.from_dict(_a ) # set timm attributes to None lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None, None, None lowerCamelCase__ = use_timm_backbone lowerCamelCase__ = backbone_config lowerCamelCase__ = num_channels lowerCamelCase__ = num_queries lowerCamelCase__ = d_model lowerCamelCase__ = encoder_ffn_dim lowerCamelCase__ = encoder_layers lowerCamelCase__ = encoder_attention_heads lowerCamelCase__ = decoder_ffn_dim lowerCamelCase__ = decoder_layers lowerCamelCase__ = decoder_attention_heads lowerCamelCase__ = dropout lowerCamelCase__ = attention_dropout lowerCamelCase__ = activation_dropout lowerCamelCase__ = activation_function lowerCamelCase__ = init_std lowerCamelCase__ = init_xavier_std lowerCamelCase__ = encoder_layerdrop lowerCamelCase__ = decoder_layerdrop lowerCamelCase__ = encoder_layers lowerCamelCase__ = auxiliary_loss lowerCamelCase__ = position_embedding_type lowerCamelCase__ = backbone lowerCamelCase__ = use_pretrained_backbone lowerCamelCase__ = dilation # Hungarian matcher lowerCamelCase__ = class_cost lowerCamelCase__ = bbox_cost lowerCamelCase__ = giou_cost # Loss coefficients lowerCamelCase__ = mask_loss_coefficient lowerCamelCase__ = dice_loss_coefficient lowerCamelCase__ = bbox_loss_coefficient lowerCamelCase__ = giou_loss_coefficient lowerCamelCase__ = eos_coefficient super().__init__(is_encoder_decoder=_a , **_a ) @property def __lowerCamelCase ( self ): '''simple docstring''' return self.encoder_attention_heads @property def __lowerCamelCase ( self ): '''simple docstring''' return self.d_model class __A ( UpperCAmelCase__ ): '''simple docstring''' lowerCAmelCase_ = version.parse("""1.11""" ) @property def __lowerCamelCase ( self ): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def __lowerCamelCase ( self ): '''simple docstring''' return 1E-5 @property def __lowerCamelCase ( self ): '''simple docstring''' return 1_2
209
"""simple docstring""" import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase : Dict = logging.get_logger(__name__) def a__ ( snake_case__ ) -> Dict: lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" ) if "model" in sd.keys(): lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" )["""model"""] # pop unnecessary weights lowerCamelCase = [ """decoder.version""", """decoder.output_projection.weight""", ] for key in keys_to_delete: if key in sd: sd.pop(snake_case__ ) lowerCamelCase = { """decoder.project_in_dim.weight""": """decoder.project_in.weight""", """decoder.project_out_dim.weight""": """decoder.project_out.weight""", """decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""", """decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""", } for old_key, new_key in keys_to_rename.items(): if old_key in sd: lowerCamelCase = sd.pop(snake_case__ ) lowerCamelCase = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: lowerCamelCase = sd[key] # We split QKV in separate Q,K,V lowerCamelCase = key.replace(""".qkv_proj.""" , """.q_proj.""" ) lowerCamelCase = key.replace(""".qkv_proj.""" , """.k_proj.""" ) lowerCamelCase = key.replace(""".qkv_proj.""" , """.v_proj.""" ) lowerCamelCase = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 lowerCamelCase , lowerCamelCase , lowerCamelCase = torch.split(snake_case__ , depth // 3 , dim=0 ) lowerCamelCase = q lowerCamelCase = k lowerCamelCase = v del sd[key] return sd @torch.no_grad() def a__ ( snake_case__ , snake_case__ , snake_case__=None ) -> Tuple: lowerCamelCase = load_checkpoint(snake_case__ ) if config is not None: lowerCamelCase = OPTConfig.from_pretrained(snake_case__ ) else: lowerCamelCase = OPTConfig() lowerCamelCase = OPTModel(snake_case__ ).half().eval() model.load_state_dict(snake_case__ ) # Check results Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) model.save_pretrained(snake_case__ ) if __name__ == "__main__": lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--fairseq_path""", type=str, help=( """path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:""" """ https://huggingface.co/models?other=opt_metasq""" ), ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""") lowerCAmelCase : Optional[Any] = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
291
0
import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class lowercase : def __init__( self , A_ , A_=3 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Dict: """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = num_choices UpperCamelCase = scope def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = None if self.use_input_mask: UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase ( self ) -> Any: """simple docstring""" return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=_a , ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict: """simple docstring""" UpperCamelCase = FalconModel(config=_a ) model.to(_a ) model.eval() UpperCamelCase = model(_a , attention_mask=_a ) UpperCamelCase = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> int: """simple docstring""" UpperCamelCase = True UpperCamelCase = FalconModel(_a ) model.to(_a ) model.eval() UpperCamelCase = model( _a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , ) UpperCamelCase = model( _a , attention_mask=_a , encoder_hidden_states=_a , ) UpperCamelCase = model(_a , attention_mask=_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> List[Any]: """simple docstring""" UpperCamelCase = FalconForCausalLM(config=_a ) model.to(_a ) model.eval() UpperCamelCase = model(_a , attention_mask=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> int: """simple docstring""" UpperCamelCase = True UpperCamelCase = True UpperCamelCase = FalconForCausalLM(config=_a ) model.to(_a ) model.eval() # first forward pass UpperCamelCase = model( _a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , use_cache=_a , ) UpperCamelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCamelCase = model( _a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , output_hidden_states=_a , )['hidden_states'][0] UpperCamelCase = model( _a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , past_key_values=_a , output_hidden_states=_a , )['hidden_states'][0] # select random slice UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) ) def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): __lowercase : Any = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) __lowercase : Dict = (FalconForCausalLM,) if is_torch_available() else () __lowercase : Optional[int] = ( { "feature-extraction": FalconModel, "text-classification": FalconForSequenceClassification, "text-generation": FalconForCausalLM, "question-answering": FalconForQuestionAnswering, "token-classification": FalconForTokenClassification, "zero-shot": FalconForSequenceClassification, } if is_torch_available() else {} ) __lowercase : Any = False __lowercase : str = False def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = FalconModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=_a , hidden_size=37 ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase , *UpperCamelCase = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: UpperCamelCase = alibi self.model_tester.create_and_check_model(_a , *_a ) def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = 3 UpperCamelCase = input_dict['input_ids'] UpperCamelCase = input_ids.ne(1 ).to(_a ) UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCamelCase = FalconForSequenceClassification(_a ) model.to(_a ) model.eval() UpperCamelCase = model(_a , attention_mask=_a , labels=_a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = 3 UpperCamelCase = 'single_label_classification' UpperCamelCase = input_dict['input_ids'] UpperCamelCase = input_ids.ne(1 ).to(_a ) UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCamelCase = FalconForSequenceClassification(_a ) model.to(_a ) model.eval() UpperCamelCase = model(_a , attention_mask=_a , labels=_a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = input_dict['input_ids'] UpperCamelCase = FalconForCausalLM(_a ) model.to(_a ) model.eval() UpperCamelCase = model(_a , use_cache=_a ) UpperCamelCase = input_ids.shape[0] UpperCamelCase = model._convert_to_rw_cache(result.past_key_values ) UpperCamelCase = model._convert_cache_to_standard_format(_a , _a ) for layer in range(len(_a ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = 3 UpperCamelCase = 'multi_label_classification' UpperCamelCase = input_dict['input_ids'] UpperCamelCase = input_ids.ne(1 ).to(_a ) UpperCamelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCamelCase = FalconForSequenceClassification(_a ) model.to(_a ) model.eval() UpperCamelCase = model(_a , attention_mask=_a , labels=_a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" # Falcon can have different numbers of KV-heads than the number of query heads, so we need # to override this test to use the right head counts. for model_class in self.all_generative_model_classes: UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(_a , 'use_cache' ): return UpperCamelCase = model_class(_a ).to(_a ) if "use_cache" not in inputs: UpperCamelCase = True UpperCamelCase = model(**_a ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return UpperCamelCase = ( getattr(_a , 'decoder_layers' , _a ) or getattr(_a , 'num_decoder_layers' , _a ) or config.num_hidden_layers ) UpperCamelCase = getattr(_a , 'num_kv_heads' , config.num_attention_heads ) UpperCamelCase = getattr(_a , 'd_model' , config.hidden_size ) UpperCamelCase = embed_dim // num_attention_heads UpperCamelCase = outputs['past_key_values'] self.assertEqual(len(_a ) , _a ) UpperCamelCase , UpperCamelCase = inputs['input_ids'].shape for i in range(_a ): if config.new_decoder_architecture: UpperCamelCase = config.num_attention_heads elif config.multi_query: UpperCamelCase = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class lowercase ( unittest.TestCase ): @slow def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' ) UpperCamelCase = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' ) model.eval() model.to(_a ) UpperCamelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(_a ) UpperCamelCase = ( 'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.' ) UpperCamelCase = model.generate(**_a , do_sample=_a , max_new_tokens=19 ) UpperCamelCase = tokenizer.batch_decode(_a )[0] self.assertEqual(_a , _a ) @slow def __UpperCamelCase ( self ) -> Any: """simple docstring""" # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: UpperCamelCase = AutoTokenizer.from_pretrained(_a ) UpperCamelCase = FalconForCausalLM.from_pretrained(_a ) model.eval() model.to(_a ) UpperCamelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(_a ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**_a , do_sample=_a , max_new_tokens=4 ) model.generate(**_a , do_sample=_a , max_new_tokens=4 ) model.generate(**_a , num_beams=2 , max_new_tokens=4 ) @slow def __UpperCamelCase ( self ) -> int: """simple docstring""" # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: UpperCamelCase = AutoTokenizer.from_pretrained(_a ) UpperCamelCase = FalconForCausalLM.from_pretrained(_a ) model.eval() model.to(device=_a ) UpperCamelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(_a ) # Test results are the same with and without cache UpperCamelCase = model.generate(**_a , do_sample=_a , max_new_tokens=20 , use_cache=_a ) UpperCamelCase = model.generate(**_a , do_sample=_a , max_new_tokens=20 , use_cache=_a ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
222
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class __magic_name__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = tempfile.mkdtemp() # fmt: off lowerCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""] # fmt: on lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) lowerCamelCase = { """do_resize""": True, """size""": {"""height""": 18, """width""": 18}, """do_normalize""": True, """image_mean""": [0.5, 0.5, 0.5], """image_std""": [0.5, 0.5, 0.5], } lowerCamelCase = os.path.join(self.tmpdirname , _a ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(_a , _a ) def _lowerCAmelCase ( self , **_a ): """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **_a ) def _lowerCAmelCase ( self , **_a ): """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a ) def _lowerCAmelCase ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_tokenizer() lowerCamelCase = self.get_image_processor() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowerCamelCase = self.get_image_processor(do_normalize=_a , padding_value=1.0 ) lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = self.prepare_image_inputs() lowerCamelCase = image_processor(_a , return_tensors="""np""" ) lowerCamelCase = processor(images=_a , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = """lower newer""" lowerCamelCase = processor(text=_a ) lowerCamelCase = tokenizer(_a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = """lower newer""" lowerCamelCase = self.prepare_image_inputs() lowerCamelCase = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with self.assertRaises(_a ): processor() def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase = processor.batch_decode(_a ) lowerCamelCase = tokenizer.batch_decode(_a ) self.assertListEqual(_a , _a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = """lower newer""" lowerCamelCase = self.prepare_image_inputs() lowerCamelCase = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
291
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor lowercase_ = logging.get_logger(__name__) class snake_case ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : str, *_lowerCamelCase : Tuple, **_lowerCamelCase : int ): '''simple docstring''' warnings.warn( '''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use LayoutLMv2ImageProcessor instead.''', _a, ) super().__init__(*_a, **_a )
266
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def a__ ( ) -> Union[str, Any]: lowerCamelCase = ArgumentParser( description=( """PyTorch TPU distributed training launch """ """helper utility that will spawn up """ """multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=snake_case__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=snake_case__ , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=snake_case__ ) return parser.parse_args() def a__ ( ) -> List[str]: lowerCamelCase = parse_args() # Import training_script as a module. lowerCamelCase = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowerCamelCase = script_fpath.stem lowerCamelCase = importlib.import_module(snake_case__ ) # Patch sys.argv lowerCamelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
291
0
import qiskit def A ( _UpperCAmelCase : Union[str, Any] = 2 ) -> qiskit.result.counts.Counts: '''simple docstring''' _UpperCAmelCase = qubits # Using Aer's simulator _UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' ) # Creating a Quantum Circuit acting on the q register _UpperCAmelCase = qiskit.QuantumCircuit(snake_case__ , snake_case__ ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , snake_case__ ): # Adding CX (CNOT) gate circuit.cx(i - 1 , snake_case__ ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(snake_case__ ) ) , list(range(snake_case__ ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator _UpperCAmelCase = qiskit.execute(snake_case__ , snake_case__ , shots=1_000 ) return job.result().get_counts(snake_case__ ) if __name__ == "__main__": print(f"""Total count for various states are: {quantum_entanglement(3)}""")
339
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : List[str] = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "sew-d" def __init__( self , _a=32 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a=2 , _a=512 , _a=256 , _a=True , _a=True , _a=("p2c", "c2p") , _a="layer_norm" , _a="gelu_python" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=0.02 , _a=1e-7 , _a=1e-5 , _a="group" , _a="gelu" , _a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _a=False , _a=128 , _a=16 , _a=True , _a=0.05 , _a=10 , _a=2 , _a=0.0 , _a=10 , _a=0 , _a="mean" , _a=False , _a=False , _a=256 , _a=0 , _a=1 , _a=2 , **_a , ): """simple docstring""" super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a ) lowerCamelCase = hidden_size lowerCamelCase = feat_extract_norm lowerCamelCase = feat_extract_activation lowerCamelCase = list(_a ) lowerCamelCase = list(_a ) lowerCamelCase = list(_a ) lowerCamelCase = conv_bias lowerCamelCase = num_conv_pos_embeddings lowerCamelCase = num_conv_pos_embedding_groups lowerCamelCase = len(self.conv_dim ) lowerCamelCase = num_hidden_layers lowerCamelCase = intermediate_size lowerCamelCase = squeeze_factor lowerCamelCase = max_position_embeddings lowerCamelCase = position_buckets lowerCamelCase = share_att_key lowerCamelCase = relative_attention lowerCamelCase = norm_rel_ebd lowerCamelCase = list(_a ) lowerCamelCase = hidden_act lowerCamelCase = num_attention_heads lowerCamelCase = hidden_dropout lowerCamelCase = attention_dropout lowerCamelCase = activation_dropout lowerCamelCase = feat_proj_dropout lowerCamelCase = final_dropout lowerCamelCase = layer_norm_eps lowerCamelCase = feature_layer_norm_eps lowerCamelCase = initializer_range lowerCamelCase = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase = apply_spec_augment lowerCamelCase = mask_time_prob lowerCamelCase = mask_time_length lowerCamelCase = mask_time_min_masks lowerCamelCase = mask_feature_prob lowerCamelCase = mask_feature_length lowerCamelCase = mask_feature_min_masks # ctc loss lowerCamelCase = ctc_loss_reduction lowerCamelCase = ctc_zero_infinity # sequence classification lowerCamelCase = use_weighted_layer_sum lowerCamelCase = classifier_proj_size @property def _lowerCAmelCase ( self ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
291
0
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ): lowercase__ = f'Expected string as input, found {type(snake_case__ )}' raise ValueError(snake_case__ ) if not isinstance(snake_case__ , snake_case__ ): lowercase__ = f'Expected boolean as use_pascal parameter, found {type(snake_case__ )}' raise ValueError(snake_case__ ) lowercase__ = input_str.split('''_''' ) lowercase__ = 0 if use_pascal else 1 lowercase__ = words[start_index:] lowercase__ = [word[0].upper() + word[1:] for word in words_to_capitalize] lowercase__ = '''''' if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
110
"""simple docstring""" from sklearn.metrics import recall_score import datasets lowerCAmelCase : Any = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ lowerCAmelCase : Any = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ lowerCAmelCase : Any = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def _lowerCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None , _a="warn" , ): """simple docstring""" lowerCamelCase = recall_score( _a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a , zero_division=_a , ) return {"recall": float(_a ) if score.size == 1 else score}
291
0
class lowerCamelCase__: def __init__( self: int , UpperCamelCase_: str , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple ): __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = graph self._normalize_graph(_a , _a ) __lowerCamelCase = len(_a ) __lowerCamelCase = None def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Dict , UpperCamelCase_: Dict ): if sources is int: __lowerCamelCase = [sources] if sinks is int: __lowerCamelCase = [sinks] if len(_a ) == 0 or len(_a ) == 0: return __lowerCamelCase = sources[0] __lowerCamelCase = sinks[0] # make fake vertex if there are more # than one source or sink if len(_a ) > 1 or len(_a ) > 1: __lowerCamelCase = 0 for i in sources: max_input_flow += sum(self.graph[i] ) __lowerCamelCase = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: __lowerCamelCase = max_input_flow __lowerCamelCase = 0 __lowerCamelCase = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: __lowerCamelCase = max_input_flow __lowerCamelCase = size - 1 def lowerCAmelCase__ ( self: int ): if self.maximum_flow_algorithm is None: raise Exception("""You need to set maximum flow algorithm before.""" ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Optional[int] ): __lowerCamelCase = algorithm(self ) class lowerCamelCase__: def __init__( self: List[Any] , UpperCamelCase_: Optional[int] ): __lowerCamelCase = flow_network __lowerCamelCase = flow_network.verticesCount __lowerCamelCase = flow_network.sourceIndex __lowerCamelCase = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that __lowerCamelCase = flow_network.graph __lowerCamelCase = False def lowerCAmelCase__ ( self: str ): if not self.executed: self._algorithm() __lowerCamelCase = True def lowerCAmelCase__ ( self: List[str] ): pass class lowerCamelCase__( UpperCAmelCase__): def __init__( self: Any , UpperCamelCase_: Any ): super().__init__(_a ) # use this to save your result __lowerCamelCase = -1 def lowerCAmelCase__ ( self: Optional[int] ): if not self.executed: raise Exception("""You should execute algorithm before using its result!""" ) return self.maximum_flow class lowerCamelCase__( UpperCAmelCase__): def __init__( self: Union[str, Any] , UpperCamelCase_: str ): super().__init__(_a ) __lowerCamelCase = [[0] * self.verticies_count for i in range(self.verticies_count )] __lowerCamelCase = [0] * self.verticies_count __lowerCamelCase = [0] * self.verticies_count def lowerCAmelCase__ ( self: str ): __lowerCamelCase = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule __lowerCamelCase = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list __lowerCamelCase = 0 while i < len(_a ): __lowerCamelCase = vertices_list[i] __lowerCamelCase = self.heights[vertex_index] self.process_vertex(_a ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(_a ) ) __lowerCamelCase = 0 else: i += 1 __lowerCamelCase = sum(self.preflow[self.source_index] ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int ): while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(_a , _a ) self.relabel(_a ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict , UpperCamelCase_: List[Any] ): __lowerCamelCase = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] ): __lowerCamelCase = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): __lowerCamelCase = self.heights[to_index] if min_height is not None: __lowerCamelCase = min_height + 1 if __name__ == "__main__": UpperCAmelCase_ = [0] UpperCAmelCase_ = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] UpperCAmelCase_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network UpperCAmelCase_ = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate UpperCAmelCase_ = flow_network.find_maximum_flow() print(f"""maximum flow is {maximum_flow}""")
12
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = dataset lowerCamelCase = process lowerCamelCase = params def __len__( self ): """simple docstring""" return len(self.dataset ) def __getitem__( self , _a ): """simple docstring""" lowerCamelCase = self.dataset[i] lowerCamelCase = self.process(_a , **self.params ) return processed class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a , _a , _a=None ): """simple docstring""" lowerCamelCase = loader lowerCamelCase = infer lowerCamelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether lowerCamelCase = None lowerCamelCase = loader_batch_size # Internal bookkeeping lowerCamelCase = None lowerCamelCase = None def __len__( self ): """simple docstring""" return len(self.loader ) def __iter__( self ): """simple docstring""" lowerCamelCase = iter(self.loader ) return self def _lowerCAmelCase ( self ): """simple docstring""" if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice lowerCamelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) lowerCamelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(_a , _a ): # Convert ModelOutput to tuple first lowerCamelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_a , _a ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around lowerCamelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. lowerCamelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 lowerCamelCase = self._loader_batch_data.__class__(_a ) self._loader_batch_index += 1 return result def _lowerCAmelCase ( self ): """simple docstring""" if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch lowerCamelCase = next(self.iterator ) lowerCamelCase = self.infer(_a , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_a , torch.Tensor ): lowerCamelCase = processed else: lowerCamelCase = list(processed.keys() )[0] lowerCamelCase = processed[key] if isinstance(_a , _a ): lowerCamelCase = len(_a ) else: lowerCamelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase = observed_batch_size # Setting internal index to unwrap the batch lowerCamelCase = processed lowerCamelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a , _a , _a=None ): """simple docstring""" super().__init__(_a , _a , _a ) def __iter__( self ): """simple docstring""" lowerCamelCase = iter(self.loader ) lowerCamelCase = None return self def _lowerCAmelCase ( self ): """simple docstring""" if self.subiterator is None: lowerCamelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item lowerCamelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators lowerCamelCase = self.infer(next(self.iterator ) , **self.params ) lowerCamelCase = next(self.subiterator ) return processed class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __iter__( self ): """simple docstring""" lowerCamelCase = iter(self.loader ) return self def _lowerCAmelCase ( self ): """simple docstring""" # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. lowerCamelCase = False lowerCamelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: lowerCamelCase = self.loader_batch_item() lowerCamelCase = item.pop("""is_last""" ) accumulator.append(_a ) if is_last: return accumulator while not is_last: lowerCamelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_a , torch.Tensor ): lowerCamelCase = processed else: lowerCamelCase = list(processed.keys() )[0] lowerCamelCase = processed[key] if isinstance(_a , _a ): lowerCamelCase = len(_a ) else: lowerCamelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase = observed_batch_size lowerCamelCase = processed lowerCamelCase = 0 while self._loader_batch_index < self.loader_batch_size: lowerCamelCase = self.loader_batch_item() lowerCamelCase = item.pop("""is_last""" ) accumulator.append(_a ) if is_last: return accumulator else: lowerCamelCase = processed lowerCamelCase = item.pop("""is_last""" ) accumulator.append(_a ) return accumulator class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a ): """simple docstring""" lowerCamelCase = dataset lowerCamelCase = key def __len__( self ): """simple docstring""" return len(self.dataset ) def __getitem__( self , _a ): """simple docstring""" return self.dataset[i][self.key] class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = dataset lowerCamelCase = keya lowerCamelCase = keya def __len__( self ): """simple docstring""" return len(self.dataset ) def __getitem__( self , _a ): """simple docstring""" return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
291
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __a : List[str] = { """configuration_mask2former""": [ """MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Mask2FormerConfig""", ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a : str = ["""Mask2FormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a : Union[str, Any] = [ """MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """Mask2FormerForUniversalSegmentation""", """Mask2FormerModel""", """Mask2FormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys __a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
210
"""simple docstring""" def a__ ( snake_case__ ) -> bool: lowerCamelCase = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def a__ ( snake_case__ = 50_00 ) -> int: lowerCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , snake_case__ )] for i, pentagonal_i in enumerate(snake_case__ ): for j in range(snake_case__ , len(snake_case__ ) ): lowerCamelCase = pentagonal_nums[j] lowerCamelCase = pentagonal_i + pentagonal_j lowerCamelCase = pentagonal_j - pentagonal_i if is_pentagonal(snake_case__ ) and is_pentagonal(snake_case__ ): return b return -1 if __name__ == "__main__": print(F"""{solution() = }""")
291
0
import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging a__ = logging.get_logger(__name__) class snake_case ( UpperCAmelCase__ ): '''simple docstring''' snake_case_ : List[Any] = CLIPConfig snake_case_ : Any = ["""CLIPEncoderLayer"""] def __init__( self : Any , lowerCAmelCase : int) -> str: """simple docstring""" super().__init__(_a) _snake_case : List[Any] = CLIPVisionModelWithProjection(config.vision_config) _snake_case : Dict = nn.Linear(config.vision_config.projection_dim , 1) _snake_case : Dict = nn.Linear(config.vision_config.projection_dim , 1) @torch.no_grad() def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : List[Any]=0.5 , lowerCAmelCase : List[str]=0.5) -> List[str]: """simple docstring""" _snake_case : List[str] = self.vision_model(_a)[0] _snake_case : List[str] = self.p_head(_a) _snake_case : str = nsfw_detected.flatten() _snake_case : Tuple = nsfw_detected > p_threshold _snake_case : Dict = nsfw_detected.tolist() if any(_a): logger.warning( """Potential NSFW content was detected in one or more images. A black image will be returned instead.""" """ Try again with a different prompt and/or seed.""") for idx, nsfw_detected_ in enumerate(_a): if nsfw_detected_: _snake_case : int = np.zeros(images[idx].shape) _snake_case : List[Any] = self.w_head(_a) _snake_case : int = watermark_detected.flatten() _snake_case : List[str] = watermark_detected > w_threshold _snake_case : Tuple = watermark_detected.tolist() if any(_a): logger.warning( """Potential watermarked content was detected in one or more images. A black image will be returned instead.""" """ Try again with a different prompt and/or seed.""") for idx, watermark_detected_ in enumerate(_a): if watermark_detected_: _snake_case : Optional[int] = np.zeros(images[idx].shape) return images, nsfw_detected, watermark_detected
317
"""simple docstring""" from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging lowerCAmelCase : Tuple = logging.get_logger(__name__) def a__ ( snake_case__ , snake_case__ ) -> Tuple: try: with open(snake_case__ , """rb""" ) as flax_state_f: lowerCamelCase = from_bytes(snake_case__ , flax_state_f.read() ) except UnpicklingError as e: try: with open(snake_case__ ) as f: if f.read().startswith("""version""" ): raise OSError( """You seem to have cloned a repository without having git-lfs installed. Please""" """ install git-lfs and run `git lfs install` followed by `git lfs pull` in the""" """ folder you cloned.""" ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def a__ ( snake_case__ , snake_case__ ) -> Tuple: try: import torch # noqa: F401 except ImportError: logger.error( """Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights lowerCamelCase = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) lowerCamelCase = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) lowerCamelCase = """""" lowerCamelCase = flatten_dict(snake_case__ , sep=""".""" ) lowerCamelCase = pt_model.state_dict() # keep track of unexpected & missing keys lowerCamelCase = [] lowerCamelCase = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCamelCase = flax_key_tuple.split(""".""" ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""] lowerCamelCase = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""] lowerCamelCase = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(snake_case__ ): lowerCamelCase = ( flax_key_tuple_string.replace("""_0""" , """.0""" ) .replace("""_1""" , """.1""" ) .replace("""_2""" , """.2""" ) .replace("""_3""" , """.3""" ) .replace("""_4""" , """.4""" ) .replace("""_5""" , """.5""" ) .replace("""_6""" , """.6""" ) .replace("""_7""" , """.7""" ) .replace("""_8""" , """.8""" ) .replace("""_9""" , """.9""" ) ) lowerCamelCase = """.""".join(snake_case__ ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict lowerCamelCase = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor lowerCamelCase = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list lowerCamelCase = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) if len(snake_case__ ) > 0: logger.warning( F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' """ use it for predictions and inference.""" ) return pt_model
291
0
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]: '''simple docstring''' A__ = filter(lambda SCREAMING_SNAKE_CASE__ : p.requires_grad , model.parameters() ) A__ = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowercase_ = logging.getLogger(__name__) def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]: '''simple docstring''' if metric == "rouge2": A__ = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": A__ = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": A__ = '{val_avg_em:.4f}-{step_count}' else: raise NotImplementedError( f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' ' function.' ) A__ = ModelCheckpoint( dirpath=snake_case__ , filename=snake_case__ , monitor=f'val_{metric}' , mode='max' , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]: '''simple docstring''' return EarlyStopping( monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=snake_case__ , verbose=snake_case__ , ) class A ( pl.Callback ): """simple docstring""" def snake_case__ ( self : Tuple,lowercase_ : str,lowercase_ : str )-> Any: '''simple docstring''' A__ = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_a ) @rank_zero_only def snake_case__ ( self : Tuple,lowercase_ : Optional[int],lowercase_ : Tuple,lowercase_ : int,lowercase_ : Tuple=True )-> str: '''simple docstring''' logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' ) A__ = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results A__ = Path(pl_module.hparams.output_dir ) if type_path == "test": A__ = od / 'test_results.txt' A__ = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. A__ = od / F'{type_path}_results/{trainer.global_step:05d}.txt' A__ = od / F'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=_a ) generations_file.parent.mkdir(exist_ok=_a ) with open(_a,'a+' ) as writer: for key in sorted(_a ): if key in ["log", "progress_bar", "preds"]: continue A__ = metrics[key] if isinstance(_a,torch.Tensor ): A__ = val.item() A__ = F'{key}: {val:.6f}\n' writer.write(_a ) if not save_generations: return if "preds" in metrics: A__ = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(_a ) @rank_zero_only def snake_case__ ( self : Any,lowercase_ : List[str],lowercase_ : Tuple )-> Union[str, Any]: '''simple docstring''' try: A__ = pl_module.model.model.num_parameters() except AttributeError: A__ = pl_module.model.num_parameters() A__ = count_trainable_parameters(_a ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def snake_case__ ( self : Any,lowercase_ : Optional[Any],lowercase_ : Any )-> Tuple: '''simple docstring''' save_json(pl_module.metrics,pl_module.metrics_save_path ) return self._write_logs(_a,_a,'test' ) @rank_zero_only def snake_case__ ( self : Optional[Any],lowercase_ : Dict,lowercase_ : List[Any] )-> Union[str, Any]: '''simple docstring''' save_json(pl_module.metrics,pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
7
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: lowerCAmelCase : int = None lowerCAmelCase : Tuple = logging.get_logger(__name__) lowerCAmelCase : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Union[str, Any] = { """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", }, """tokenizer_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""", }, } lowerCAmelCase : Optional[int] = { """xlnet-base-cased""": None, """xlnet-large-cased""": None, } lowerCAmelCase : Union[str, Any] = """▁""" # Segments (not really needed) lowerCAmelCase : str = 0 lowerCAmelCase : Optional[int] = 1 lowerCAmelCase : Tuple = 2 lowerCAmelCase : Optional[Any] = 3 lowerCAmelCase : List[Any] = 4 class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = "left" __UpperCamelCase = XLNetTokenizer def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , **_a , ): """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token super().__init__( vocab_file=_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , ) lowerCamelCase = 3 lowerCamelCase = do_lower_case lowerCamelCase = remove_space lowerCamelCase = keep_accents lowerCamelCase = vocab_file lowerCamelCase = False if not self.vocab_file else True def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" lowerCamelCase = [self.sep_token_id] lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" lowerCamelCase = [self.sep_token_id] lowerCamelCase = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(_a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowerCamelCase = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
291
0
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class __lowerCAmelCase ( nn.Module ): """simple docstring""" _snake_case : Optional[int] = 4_2 _snake_case : Dict = 4_2 _snake_case : Dict = 0.0 _snake_case : Tuple = 1 _snake_case : Tuple = 1 _snake_case : int = True _snake_case : Dict = False _snake_case : Optional[Any] = False _snake_case : List[Any] = False _snake_case : List[str] = jnp.floataa def snake_case__ ( self : Optional[int] ) -> str: '''simple docstring''' _UpperCamelCase = [] _UpperCamelCase = [] for i in range(self.num_layers ): _UpperCamelCase = self.in_channels if i == 0 else self.out_channels _UpperCamelCase = FlaxResnetBlockaD( in_channels=_a , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_a ) _UpperCamelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_a ) _UpperCamelCase = resnets _UpperCamelCase = attentions if self.add_downsample: _UpperCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any]=True ) -> List[Any]: '''simple docstring''' _UpperCamelCase = () for resnet, attn in zip(self.resnets , self.attentions ): _UpperCamelCase = resnet(_a , _a , deterministic=_a ) _UpperCamelCase = attn(_a , _a , deterministic=_a ) output_states += (hidden_states,) if self.add_downsample: _UpperCamelCase = self.downsamplers_a(_a ) output_states += (hidden_states,) return hidden_states, output_states class __lowerCAmelCase ( nn.Module ): """simple docstring""" _snake_case : Tuple = 4_2 _snake_case : List[Any] = 4_2 _snake_case : Tuple = 0.0 _snake_case : Optional[int] = 1 _snake_case : List[str] = True _snake_case : str = jnp.floataa def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = [] for i in range(self.num_layers ): _UpperCamelCase = self.in_channels if i == 0 else self.out_channels _UpperCamelCase = FlaxResnetBlockaD( in_channels=_a , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_a ) _UpperCamelCase = resnets if self.add_downsample: _UpperCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int=True ) -> List[str]: '''simple docstring''' _UpperCamelCase = () for resnet in self.resnets: _UpperCamelCase = resnet(_a , _a , deterministic=_a ) output_states += (hidden_states,) if self.add_downsample: _UpperCamelCase = self.downsamplers_a(_a ) output_states += (hidden_states,) return hidden_states, output_states class __lowerCAmelCase ( nn.Module ): """simple docstring""" _snake_case : Dict = 4_2 _snake_case : Union[str, Any] = 4_2 _snake_case : List[str] = 4_2 _snake_case : List[Any] = 0.0 _snake_case : int = 1 _snake_case : Tuple = 1 _snake_case : List[str] = True _snake_case : Dict = False _snake_case : Optional[Any] = False _snake_case : int = False _snake_case : int = jnp.floataa def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = [] _UpperCamelCase = [] for i in range(self.num_layers ): _UpperCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels _UpperCamelCase = self.prev_output_channel if i == 0 else self.out_channels _UpperCamelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_a ) _UpperCamelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_a ) _UpperCamelCase = resnets _UpperCamelCase = attentions if self.add_upsample: _UpperCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str=True ) -> Tuple: '''simple docstring''' for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states _UpperCamelCase = res_hidden_states_tuple[-1] _UpperCamelCase = res_hidden_states_tuple[:-1] _UpperCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _UpperCamelCase = resnet(_a , _a , deterministic=_a ) _UpperCamelCase = attn(_a , _a , deterministic=_a ) if self.add_upsample: _UpperCamelCase = self.upsamplers_a(_a ) return hidden_states class __lowerCAmelCase ( nn.Module ): """simple docstring""" _snake_case : Tuple = 4_2 _snake_case : str = 4_2 _snake_case : Dict = 4_2 _snake_case : Optional[Any] = 0.0 _snake_case : Any = 1 _snake_case : Tuple = True _snake_case : Any = jnp.floataa def snake_case__ ( self : Tuple ) -> str: '''simple docstring''' _UpperCamelCase = [] for i in range(self.num_layers ): _UpperCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels _UpperCamelCase = self.prev_output_channel if i == 0 else self.out_channels _UpperCamelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_a ) _UpperCamelCase = resnets if self.add_upsample: _UpperCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str=True ) -> str: '''simple docstring''' for resnet in self.resnets: # pop res hidden states _UpperCamelCase = res_hidden_states_tuple[-1] _UpperCamelCase = res_hidden_states_tuple[:-1] _UpperCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _UpperCamelCase = resnet(_a , _a , deterministic=_a ) if self.add_upsample: _UpperCamelCase = self.upsamplers_a(_a ) return hidden_states class __lowerCAmelCase ( nn.Module ): """simple docstring""" _snake_case : Dict = 4_2 _snake_case : Union[str, Any] = 0.0 _snake_case : Any = 1 _snake_case : List[Any] = 1 _snake_case : Any = False _snake_case : Tuple = False _snake_case : Union[str, Any] = jnp.floataa def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] _UpperCamelCase = [] for _ in range(self.num_layers ): _UpperCamelCase = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_a ) _UpperCamelCase = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_a ) _UpperCamelCase = resnets _UpperCamelCase = attentions def __call__( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict=True ) -> int: '''simple docstring''' _UpperCamelCase = self.resnets[0](_a , _a ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): _UpperCamelCase = attn(_a , _a , deterministic=_a ) _UpperCamelCase = resnet(_a , _a , deterministic=_a ) return hidden_states
324
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class __magic_name__ ( unittest.TestCase ): '''simple docstring''' __UpperCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __UpperCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def _lowerCAmelCase ( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = TextaTextGenerationPipeline(model=_a , tokenizer=_a ) return generator, ["Something to write", "Something else"] def _lowerCAmelCase ( self , _a , _a ): """simple docstring""" lowerCamelCase = generator("""Something there""" ) self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) lowerCamelCase = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) lowerCamelCase = generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) with self.assertRaises(_a ): generator(4 ) @require_torch def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" ) # do_sample=False necessary for reproducibility lowerCamelCase = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] ) lowerCamelCase = 3 lowerCamelCase = generator( """Something there""" , num_return_sequences=_a , num_beams=_a , ) lowerCamelCase = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_a , _a ) lowerCamelCase = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a ) self.assertEqual( _a , [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] , ) lowerCamelCase = generator.model.config.eos_token_id lowerCamelCase = """<pad>""" lowerCamelCase = generator( ["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , ) self.assertEqual( _a , [ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] , ) @require_tf def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" ) # do_sample=False necessary for reproducibility lowerCamelCase = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] )
291
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCAmelCase_ ( UpperCAmelCase__ ): __lowerCamelCase : Optional[int] = ["pixel_values"] def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , **_lowerCAmelCase , ) -> Optional[int]: super().__init__(**_a ) _lowerCAmelCase = size if size is not None else {"shortest_edge": 224} _lowerCAmelCase = get_size_dict(_a , default_to_square=_a ) _lowerCAmelCase = crop_size if crop_size is not None else {"height": 224, "width": 224} _lowerCAmelCase = get_size_dict(_a , default_to_square=_a , param_name="crop_size" ) _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = resample _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size _lowerCAmelCase = do_rescale _lowerCAmelCase = rescale_factor _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN _lowerCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD _lowerCAmelCase = do_convert_rgb def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Union[str, Any]: _lowerCAmelCase = get_size_dict(_a , default_to_square=_a ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) _lowerCAmelCase = get_resize_output_image_size(_a , size=size["shortest_edge"] , default_to_square=_a ) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Dict: _lowerCAmelCase = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(_a , size=(size["height"], size["width"]) , data_format=_a , **_a ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> str: return rescale(_a , scale=_a , data_format=_a , **_a ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Dict: return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> Any: _lowerCAmelCase = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase = size if size is not None else self.size _lowerCAmelCase = get_size_dict(_a , param_name="size" , default_to_square=_a ) _lowerCAmelCase = resample if resample is not None else self.resample _lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCAmelCase = crop_size if crop_size is not None else self.crop_size _lowerCAmelCase = get_size_dict(_a , param_name="crop_size" , default_to_square=_a ) _lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase = image_mean if image_mean is not None else self.image_mean _lowerCAmelCase = image_std if image_std is not None else self.image_std _lowerCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _lowerCAmelCase = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: _lowerCAmelCase = [convert_to_rgb(_a ) for image in images] # All transformations expect numpy arrays. _lowerCAmelCase = [to_numpy_array(_a ) for image in images] if do_resize: _lowerCAmelCase = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_center_crop: _lowerCAmelCase = [self.center_crop(image=_a , size=_a ) for image in images] if do_rescale: _lowerCAmelCase = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: _lowerCAmelCase = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] _lowerCAmelCase = [to_channel_dimension_format(_a , _a ) for image in images] _lowerCAmelCase = {"pixel_values": images} return BatchFeature(data=_a , tensor_type=_a )
158
"""simple docstring""" def a__ ( snake_case__ , snake_case__ = False ) -> str: if not isinstance(snake_case__ , snake_case__ ): lowerCamelCase = F'Expected string as input, found {type(snake_case__ )}' raise ValueError(snake_case__ ) if not isinstance(snake_case__ , snake_case__ ): lowerCamelCase = F'Expected boolean as use_pascal parameter, found {type(snake_case__ )}' raise ValueError(snake_case__ ) lowerCamelCase = input_str.split("""_""" ) lowerCamelCase = 0 if use_pascal else 1 lowerCamelCase = words[start_index:] lowerCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize] lowerCamelCase = """""" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
291
0
from __future__ import annotations def lowerCAmelCase__(__snake_case ) -> bool: '''simple docstring''' lowerCamelCase__ = len(snake_case__ ) # We need to create solution object to save path. lowerCamelCase__ = [[0 for _ in range(snake_case__ )] for _ in range(snake_case__ )] lowerCamelCase__ = run_maze(snake_case__ ,0 ,0 ,snake_case__ ) if solved: print('''\n'''.join(str(snake_case__ ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> bool: '''simple docstring''' lowerCamelCase__ = len(snake_case__ ) # Final check point. if i == j == (size - 1): lowerCamelCase__ = 1 return True lowerCamelCase__ = (not i < 0) and (not j < 0) # Check lower bounds lowerCamelCase__ = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. lowerCamelCase__ = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited lowerCamelCase__ = 1 # check for directions if ( run_maze(snake_case__ ,i + 1 ,snake_case__ ,snake_case__ ) or run_maze(snake_case__ ,snake_case__ ,j + 1 ,snake_case__ ) or run_maze(snake_case__ ,i - 1 ,snake_case__ ,snake_case__ ) or run_maze(snake_case__ ,snake_case__ ,j - 1 ,snake_case__ ) ): return True lowerCamelCase__ = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
209
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch lowerCAmelCase : int = logging.get_logger(__name__) class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["pixel_values"] def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = None , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ): """simple docstring""" super().__init__(**_a ) lowerCamelCase = size if size is not None else {"""shortest_edge""": 256} lowerCamelCase = get_size_dict(_a , default_to_square=_a ) lowerCamelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" ) lowerCamelCase = do_resize lowerCamelCase = size lowerCamelCase = resample lowerCamelCase = do_center_crop lowerCamelCase = crop_size lowerCamelCase = do_rescale lowerCamelCase = rescale_factor lowerCamelCase = do_normalize lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ): """simple docstring""" lowerCamelCase = get_size_dict(_a , default_to_square=_a ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) lowerCamelCase = get_resize_output_image_size(_a , size=size["""shortest_edge"""] , default_to_square=_a ) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a , _a = None , **_a , ): """simple docstring""" lowerCamelCase = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a , _a = None , **_a ): """simple docstring""" return rescale(_a , scale=_a , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a , _a , _a = None , **_a , ): """simple docstring""" return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ): """simple docstring""" lowerCamelCase = do_resize if do_resize is not None else self.do_resize lowerCamelCase = size if size is not None else self.size lowerCamelCase = get_size_dict(_a , default_to_square=_a ) lowerCamelCase = resample if resample is not None else self.resample lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase = crop_size if crop_size is not None else self.crop_size lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" ) lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase = image_mean if image_mean is not None else self.image_mean lowerCamelCase = image_std if image_std is not None else self.image_std lowerCamelCase = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowerCamelCase = [to_numpy_array(_a ) for image in images] if do_resize: lowerCamelCase = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_center_crop: lowerCamelCase = [self.center_crop(image=_a , size=_a ) for image in images] if do_rescale: lowerCamelCase = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: lowerCamelCase = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] lowerCamelCase = [to_channel_dimension_format(_a , _a ) for image in images] lowerCamelCase = {"""pixel_values""": images} return BatchFeature(data=_a , tensor_type=_a ) def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" lowerCamelCase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_a ) != len(_a ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(_a ): lowerCamelCase = target_sizes.numpy() lowerCamelCase = [] for idx in range(len(_a ) ): lowerCamelCase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_a ) lowerCamelCase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_a ) else: lowerCamelCase = logits.argmax(dim=1 ) lowerCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
291
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : Tuple = logging.get_logger(__name__) _UpperCAmelCase : Optional[int] = """▁""" _UpperCAmelCase : int = {"""vocab_file""": """sentencepiece.bpe.model"""} _UpperCAmelCase : int = { """vocab_file""": { """facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""", } } _UpperCAmelCase : Tuple = { """facebook/xglm-564M""": 2_048, } class lowercase ( UpperCAmelCase__ ): __lowercase : str = VOCAB_FILES_NAMES __lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP __lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_ = None , **A_ , ) -> int: """simple docstring""" UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer UpperCamelCase = 7 UpperCamelCase = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )] UpperCamelCase = kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , ) UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_a ) ) UpperCamelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab UpperCamelCase = 1 # Mimic fairseq token-to-id alignment for the first 4 token UpperCamelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} UpperCamelCase = len(self.sp_model ) UpperCamelCase = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(_a ) UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> List[Any]: """simple docstring""" UpperCamelCase = self.__dict__.copy() UpperCamelCase = None UpperCamelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self , A_ ) -> int: """simple docstring""" UpperCamelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): UpperCamelCase = {} UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __UpperCamelCase ( self , A_ , A_ = None ) -> Dict: """simple docstring""" if token_ids_a is None: return [self.sep_token_id] + token_ids_a UpperCamelCase = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def __UpperCamelCase ( self , A_ , A_ = None , A_ = False ) -> Optional[Any]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a ) if token_ids_a is None: return [1] + ([0] * len(_a )) return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) def __UpperCamelCase ( self , A_ , A_ = None ) -> Optional[int]: """simple docstring""" UpperCamelCase = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" return self.sp_model.encode(_a , out_type=_a ) def __UpperCamelCase ( self , A_ ) -> Dict: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCamelCase = self.sp_model.PieceToId(_a ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __UpperCamelCase ( self , A_ ) -> Union[str, Any]: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __UpperCamelCase ( self , A_ ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = ''.join(_a ).replace(_a , ' ' ).strip() return out_string def __UpperCamelCase ( self , A_ , A_ = None ) -> List[Any]: """simple docstring""" if not os.path.isdir(_a ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase = os.path.join( _a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _a ) elif not os.path.isfile(self.vocab_file ): with open(_a , 'wb' ) as fi: UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(_a ) return (out_vocab_file,)
222
"""simple docstring""" import operator as op lowerCAmelCase : Dict = """scaler.pt""" lowerCAmelCase : Tuple = """pytorch_model""" lowerCAmelCase : Union[str, Any] = """random_states""" lowerCAmelCase : Union[str, Any] = """optimizer""" lowerCAmelCase : Dict = """scheduler""" lowerCAmelCase : int = """pytorch_model.bin""" lowerCAmelCase : str = """pytorch_model.bin.index.json""" lowerCAmelCase : Union[str, Any] = """model.safetensors""" lowerCAmelCase : List[Any] = """model.safetensors.index.json""" lowerCAmelCase : List[Any] = """1.10.2""" lowerCAmelCase : Any = """py38""" lowerCAmelCase : Optional[int] = """4.17.0""" lowerCAmelCase : str = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""] lowerCAmelCase : Tuple = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""] lowerCAmelCase : List[Any] = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""] lowerCAmelCase : List[str] = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""] lowerCAmelCase : List[str] = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""] lowerCAmelCase : Any = """2.0.1""" lowerCAmelCase : List[Any] = ["""pdsh""", """standard""", """openmpi""", """mvapich"""] lowerCAmelCase : Union[str, Any] = ["""default""", """reduce-overhead""", """max-autotune"""] lowerCAmelCase : Optional[int] = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 lowerCAmelCase : Union[str, Any] = [ """nnodes""", """nproc_per_node""", """rdzv_backend""", """rdzv_endpoint""", """rdzv_id""", """rdzv_conf""", """standalone""", """max_restarts""", """monitor_interval""", """start_method""", """role""", """module""", """m""", """no_python""", """run_path""", """log_dir""", """r""", """redirects""", """t""", """tee""", """node_rank""", """master_addr""", """master_port""", ] lowerCAmelCase : List[str] = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""] lowerCAmelCase : Optional[Any] = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
291
0
"""simple docstring""" import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput lowercase_ = """scheduler_config.json""" class snake_case ( UpperCAmelCase__ ): '''simple docstring''' A_ : List[str] = 1 A_ : List[Any] = 2 A_ : Union[str, Any] = 3 A_ : Optional[int] = 4 A_ : Optional[Any] = 5 @dataclass class snake_case ( UpperCAmelCase__ ): '''simple docstring''' A_ : Tuple = 42 class snake_case : '''simple docstring''' A_ : Any = SCHEDULER_CONFIG_NAME A_ : List[Any] = ["dtype"] A_ : Optional[int] = [] A_ : Tuple = True @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any], _lowerCamelCase : Optional[int] = None, _lowerCamelCase : int = None, _lowerCamelCase : Optional[int]=False, **_lowerCamelCase : Union[str, Any], ): '''simple docstring''' __A , __A = cls.load_config( pretrained_model_name_or_path=_a, subfolder=_a, return_unused_kwargs=_a, **_a, ) __A , __A = cls.from_config(_a, return_unused_kwargs=_a, **_a ) if hasattr(_a, '''create_state''' ) and getattr(_a, '''has_state''', _a ): __A = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : Any, _lowerCamelCase : int = False, **_lowerCamelCase : List[Any] ): '''simple docstring''' self.save_config(save_directory=_a, push_to_hub=_a, **_a ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' return self._get_compatibles() @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ): '''simple docstring''' __A = list(set([cls.__name__] + cls._compatibles ) ) __A = importlib.import_module(__name__.split('''.''' )[0] ) __A = [ getattr(_a, _a ) for c in compatible_classes_str if hasattr(_a, _a ) ] return compatible_classes def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" assert len(snake_case__ ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(snake_case__ ) - x.ndim) ) , snake_case__ ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=0.999 , __UpperCamelCase=jnp.floataa ): """simple docstring""" def alpha_bar(__UpperCamelCase ): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2 __A = [] for i in range(snake_case__ ): __A = i / num_diffusion_timesteps __A = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(snake_case__ ) / alpha_bar(snake_case__ ) , snake_case__ ) ) return jnp.array(snake_case__ , dtype=snake_case__ ) @flax.struct.dataclass class snake_case : '''simple docstring''' A_ : Any = 42 A_ : Any = 42 A_ : Tuple = 42 @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[int], _lowerCamelCase : Optional[Any] ): '''simple docstring''' __A = scheduler.config if config.trained_betas is not None: __A = jnp.asarray(config.trained_betas, dtype=scheduler.dtype ) elif config.beta_schedule == "linear": __A = jnp.linspace(config.beta_start, config.beta_end, config.num_train_timesteps, dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __A = ( jnp.linspace( config.beta_start**0.5, config.beta_end**0.5, config.num_train_timesteps, dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __A = betas_for_alpha_bar(config.num_train_timesteps, dtype=scheduler.dtype ) else: raise NotImplementedError( f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' ) __A = 1.0 - betas __A = jnp.cumprod(_a, axis=0 ) return cls( alphas=_a, betas=_a, alphas_cumprod=_a, ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = state.alphas_cumprod __A = alphas_cumprod[timesteps] ** 0.5 __A = sqrt_alpha_prod.flatten() __A = broadcast_to_shape_from_left(snake_case__ , original_samples.shape ) __A = (1 - alphas_cumprod[timesteps]) ** 0.5 __A = sqrt_one_minus_alpha_prod.flatten() __A = broadcast_to_shape_from_left(snake_case__ , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A , __A = get_sqrt_alpha_prod(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) __A = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A , __A = get_sqrt_alpha_prod(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) __A = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
266
"""simple docstring""" import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __magic_name__ : '''simple docstring''' def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ): """simple docstring""" lowerCamelCase = parent lowerCamelCase = batch_size lowerCamelCase = image_size lowerCamelCase = patch_size lowerCamelCase = num_channels lowerCamelCase = is_training lowerCamelCase = use_labels lowerCamelCase = hidden_size lowerCamelCase = num_hidden_layers lowerCamelCase = num_attention_heads lowerCamelCase = intermediate_size lowerCamelCase = hidden_act lowerCamelCase = hidden_dropout_prob lowerCamelCase = attention_probs_dropout_prob lowerCamelCase = type_sequence_label_size lowerCamelCase = initializer_range lowerCamelCase = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase = (image_size // patch_size) ** 2 lowerCamelCase = num_patches + 1 def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase = None if self.use_labels: lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase = self.get_config() return config, pixel_values, labels def _lowerCAmelCase ( self ): """simple docstring""" return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def _lowerCAmelCase ( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = ViTMSNModel(config=_a ) model.to(_a ) model.eval() lowerCamelCase = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = self.type_sequence_label_size lowerCamelCase = ViTMSNForImageClassification(_a ) model.to(_a ) model.eval() lowerCamelCase = model(_a , labels=_a ) print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" ) print("""Labels: {labels}""" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase = 1 lowerCamelCase = ViTMSNForImageClassification(_a ) model.to(_a ) model.eval() lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase = model(_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs lowerCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () __UpperCamelCase = ( {"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification} if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = ViTMSNModelTester(self ) lowerCamelCase = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 ) def _lowerCAmelCase ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMSN does not use inputs_embeds""" ) def _lowerCAmelCase ( self ): """simple docstring""" pass def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase = model_class(_a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_a , nn.Linear ) ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase = model_class(_a ) lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase = [*signature.parameters.keys()] lowerCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) @slow def _lowerCAmelCase ( self ): """simple docstring""" for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase = ViTMSNModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def a__ ( ) -> Any: lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowerCAmelCase ( self ): """simple docstring""" return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None @slow def _lowerCAmelCase ( self ): """simple docstring""" torch.manual_seed(2 ) lowerCamelCase = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a ) lowerCamelCase = self.default_image_processor lowerCamelCase = prepare_img() lowerCamelCase = image_processor(images=_a , return_tensors="""pt""" ).to(_a ) # forward pass with torch.no_grad(): lowerCamelCase = model(**_a ) # verify the logits lowerCamelCase = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , _a ) lowerCamelCase = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(_a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
291
0
from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging UpperCAmelCase__ = logging.get_logger(__name__) def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ) -> Tuple: '''simple docstring''' try: with open(snake_case__ , 'rb' ) as flax_state_f: _UpperCAmelCase = from_bytes(snake_case__ , flax_state_f.read() ) except UnpicklingError as e: try: with open(snake_case__ ) as f: if f.read().startswith('version' ): raise OSError( 'You seem to have cloned a repository without having git-lfs installed. Please' ' install git-lfs and run `git lfs install` followed by `git lfs pull` in the' ' folder you cloned.' ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any ) -> Tuple: '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( 'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see' ' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation' ' instructions.' ) raise # check if we have bf16 weights _UpperCAmelCase = flatten_dict(jax.tree_util.tree_map(lambda _UpperCAmelCase : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( 'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ' 'before loading those in PyTorch model.' ) _UpperCAmelCase = jax.tree_util.tree_map( lambda _UpperCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) _UpperCAmelCase = '' _UpperCAmelCase = flatten_dict(snake_case__ , sep='.' ) _UpperCAmelCase = pt_model.state_dict() # keep track of unexpected & missing keys _UpperCAmelCase = [] _UpperCAmelCase = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): _UpperCAmelCase = flax_key_tuple.split('.' ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: _UpperCAmelCase = flax_key_tuple_array[:-1] + ['weight'] _UpperCAmelCase = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": _UpperCAmelCase = flax_key_tuple_array[:-1] + ['weight'] _UpperCAmelCase = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": _UpperCAmelCase = flax_key_tuple_array[:-1] + ['weight'] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(snake_case__ ): _UpperCAmelCase = ( flax_key_tuple_string.replace('_0' , '.0' ) .replace('_1' , '.1' ) .replace('_2' , '.2' ) .replace('_3' , '.3' ) .replace('_4' , '.4' ) .replace('_5' , '.5' ) .replace('_6' , '.6' ) .replace('_7' , '.7' ) .replace('_8' , '.8' ) .replace('_9' , '.9' ) ) _UpperCAmelCase = '.'.join(snake_case__ ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected " F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." ) else: # add weight to pytorch dict _UpperCAmelCase = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor _UpperCAmelCase = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list _UpperCAmelCase = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( 'Some weights of the Flax model were not used when initializing the PyTorch model' F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing" F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture" ' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This' F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect" ' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a' ' FlaxBertForSequenceClassification model).' ) if len(snake_case__ ) > 0: logger.warning( F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly" F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to" ' use it for predictions and inference.' ) return pt_model
339
"""simple docstring""" import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :] def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__="attention" ) -> List[Any]: lowerCamelCase = lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] ) lowerCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] ) lowerCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] ) lowerCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] ) lowerCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ) -> List[str]: if split_mlp_wi: lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :] lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :] lowerCamelCase = (wi_a, wi_a) else: lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :] lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :] return wi, wo def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple: return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i] def a__ ( snake_case__ , *, snake_case__ , snake_case__ , snake_case__ = False ) -> Dict: lowerCamelCase = traverse_util.flatten_dict(variables["""target"""] ) lowerCamelCase = {"""/""".join(snake_case__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowerCamelCase = """encoder/encoder/mlp/wi_0/kernel""" in old print("""Split MLP:""" , snake_case__ ) lowerCamelCase = collections.OrderedDict() # Shared embeddings. lowerCamelCase = old["""token_embedder/embedding"""] # Encoder. for i in range(snake_case__ ): # Block i, layer 0 (Self Attention). lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_attention_layer_norm""" ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """encoder""" , """attention""" ) lowerCamelCase = layer_norm lowerCamelCase = k.T lowerCamelCase = o.T lowerCamelCase = q.T lowerCamelCase = v.T # Block i, layer 1 (MLP). lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_mlp_layer_norm""" ) lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """encoder""" , snake_case__ ) lowerCamelCase = layer_norm if split_mlp_wi: lowerCamelCase = wi[0].T lowerCamelCase = wi[1].T else: lowerCamelCase = wi.T lowerCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer lowerCamelCase = tax_relpos_bias_lookup( snake_case__ , snake_case__ , """encoder""" ).T lowerCamelCase = old["""encoder/encoder_norm/scale"""] if not scalable_attention: lowerCamelCase = tax_relpos_bias_lookup( snake_case__ , 0 , """encoder""" ).T lowerCamelCase = tax_relpos_bias_lookup( snake_case__ , 0 , """decoder""" ).T if not is_encoder_only: # Decoder. for i in range(snake_case__ ): # Block i, layer 0 (Self Attention). lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_self_attention_layer_norm""" ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """self_attention""" ) lowerCamelCase = layer_norm lowerCamelCase = k.T lowerCamelCase = o.T lowerCamelCase = q.T lowerCamelCase = v.T # Block i, layer 1 (Cross Attention). lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_cross_attention_layer_norm""" ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """encoder_decoder_attention""" ) lowerCamelCase = layer_norm lowerCamelCase = k.T lowerCamelCase = o.T lowerCamelCase = q.T lowerCamelCase = v.T # Block i, layer 2 (MLP). lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_mlp_layer_norm""" ) lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """decoder""" , snake_case__ ) lowerCamelCase = layer_norm if split_mlp_wi: lowerCamelCase = wi[0].T lowerCamelCase = wi[1].T else: lowerCamelCase = wi.T lowerCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer lowerCamelCase = tax_relpos_bias_lookup(snake_case__ , snake_case__ , """decoder""" ).T lowerCamelCase = old["""decoder/decoder_norm/scale"""] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowerCamelCase = old["""decoder/logits_dense/kernel"""].T return new def a__ ( snake_case__ , snake_case__ ) -> Optional[int]: lowerCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowerCamelCase = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowerCamelCase = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) lowerCamelCase = state_dict["""shared.weight"""] return state_dict def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: lowerCamelCase = checkpoints.load_tax_checkpoint(snake_case__ ) lowerCamelCase = convert_tax_to_pytorch( snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ , scalable_attention=snake_case__ ) lowerCamelCase = make_state_dict(snake_case__ , snake_case__ ) model.load_state_dict(snake_case__ , strict=snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = False , ) -> str: lowerCamelCase = MTaConfig.from_json_file(snake_case__ ) print(F'Building PyTorch model from configuration: {config}' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowerCamelCase = UMTaEncoderModel(snake_case__ ) else: lowerCamelCase = UMTaForConditionalGeneration(snake_case__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(snake_case__ ) # Verify that we can load the checkpoint. model.from_pretrained(snake_case__ ) print("""Done""" ) if __name__ == "__main__": lowerCAmelCase : Optional[int] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""") # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False ) parser.add_argument( """--scalable_attention""", action="""store_true""", help="""Whether the model uses scaled attention (umt5 model)""", default=False, ) lowerCAmelCase : int = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
291
0
import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor lowerCAmelCase = random.Random() def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ): """simple docstring""" if rng is None: lowercase__ = global_rng lowercase__ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class _a ( unittest.TestCase ): def __init__( self: Any , UpperCamelCase_: int , UpperCamelCase_: Dict=7 , UpperCamelCase_: List[Any]=400 , UpperCamelCase_: Dict=2_000 , UpperCamelCase_: Dict=24 , UpperCamelCase_: Union[str, Any]=24 , UpperCamelCase_: List[str]=0.0 , UpperCamelCase_: Dict=16_000 , UpperCamelCase_: int=True , UpperCamelCase_: Union[str, Any]=True , ) -> Dict: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = min_seq_length lowercase__ = max_seq_length lowercase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowercase__ = feature_size lowercase__ = num_mel_bins lowercase__ = padding_value lowercase__ = sampling_rate lowercase__ = return_attention_mask lowercase__ = do_normalize def lowerCamelCase_ ( self: Optional[Any] ) -> Tuple: """simple docstring""" return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Union[str, Any]=False , UpperCamelCase_: List[Any]=False ) -> Optional[int]: """simple docstring""" def _flatten(UpperCamelCase_: List[Any] ): return list(itertools.chain(*_a ) ) if equal_length: lowercase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowercase__ = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowercase__ = [np.asarray(_a ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class _a ( UpperCAmelCase__ , unittest.TestCase ): _lowercase : str = SpeechaTextFeatureExtractor if is_speech_available() else None def lowerCamelCase_ ( self: int ) -> Tuple: """simple docstring""" lowercase__ = SpeechaTextFeatureExtractionTester(self ) def lowerCamelCase_ ( self: str , UpperCamelCase_: Dict ) -> Any: """simple docstring""" self.assertTrue(np.all(np.mean(_a , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(_a , axis=0 ) - 1 ) < 1E-3 ) ) def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowercase__ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] lowercase__ = [np.asarray(_a ) for speech_input in speech_inputs] # Test feature size lowercase__ = feature_extractor(_a , padding=_a , return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input lowercase__ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features lowercase__ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) ) # Test batched lowercase__ = feature_extractor(_a , return_tensors='''np''' ).input_features lowercase__ = feature_extractor(_a , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(_a , _a ): self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. lowercase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)] lowercase__ = np.asarray(_a ) lowercase__ = feature_extractor(_a , return_tensors='''np''' ).input_features lowercase__ = feature_extractor(_a , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(_a , _a ): self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) ) def lowerCamelCase_ ( self: Dict ) -> Optional[int]: """simple docstring""" lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase__ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] lowercase__ = ['''longest''', '''max_length''', '''do_not_pad'''] lowercase__ = [None, 16, None] for max_length, padding in zip(_a , _a ): lowercase__ = feature_extractor( _a , padding=_a , max_length=_a , return_attention_mask=_a ) lowercase__ = inputs.input_features lowercase__ = inputs.attention_mask lowercase__ = [np.sum(_a ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowerCamelCase_ ( self: Union[str, Any] ) -> Tuple: """simple docstring""" lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase__ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] lowercase__ = ['''longest''', '''max_length''', '''do_not_pad'''] lowercase__ = [None, 16, None] for max_length, padding in zip(_a , _a ): lowercase__ = feature_extractor( _a , max_length=_a , padding=_a , return_tensors='''np''' , return_attention_mask=_a ) lowercase__ = inputs.input_features lowercase__ = inputs.attention_mask lowercase__ = [np.sum(_a ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowerCamelCase_ ( self: int ) -> Optional[int]: """simple docstring""" lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase__ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] lowercase__ = feature_extractor( _a , padding='''max_length''' , max_length=4 , truncation=_a , return_tensors='''np''' , return_attention_mask=_a , ) lowercase__ = inputs.input_features lowercase__ = inputs.attention_mask lowercase__ = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def lowerCamelCase_ ( self: Tuple ) -> str: """simple docstring""" lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase__ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] lowercase__ = feature_extractor( _a , padding='''longest''' , max_length=4 , truncation=_a , return_tensors='''np''' , return_attention_mask=_a , ) lowercase__ = inputs.input_features lowercase__ = inputs.attention_mask lowercase__ = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 24) ) lowercase__ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] lowercase__ = feature_extractor( _a , padding='''longest''' , max_length=16 , truncation=_a , return_tensors='''np''' , return_attention_mask=_a , ) lowercase__ = inputs.input_features lowercase__ = inputs.attention_mask lowercase__ = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 24) ) def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]: """simple docstring""" import torch lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase__ = np.random.rand(100 , 32 ).astype(np.floataa ) lowercase__ = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowercase__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) lowercase__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Dict ) -> Union[str, Any]: """simple docstring""" from datasets import load_dataset lowercase__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech lowercase__ = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def lowerCamelCase_ ( self: List[Any] ) -> int: """simple docstring""" lowercase__ = np.array([ -1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241, -1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128, -1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625, ] ) # fmt: on lowercase__ = self._load_datasamples(1 ) lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase__ = feature_extractor(_a , return_tensors='''pt''' ).input_features self.assertEquals(input_features.shape , (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30] , _a , atol=1E-4 ) )
110
"""simple docstring""" from __future__ import annotations def a__ ( snake_case__ , snake_case__ ) -> bool: if len(snake_case__ ) == 0: return False lowerCamelCase = len(snake_case__ ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , snake_case__ ) else: return binary_search(a_list[midpoint + 1 :] , snake_case__ ) if __name__ == "__main__": lowerCAmelCase : List[Any] = input("""Enter numbers separated by comma:\n""").strip() lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(""",""")] lowerCAmelCase : Optional[int] = int(input("""Enter the number to be found in the list:\n""").strip()) lowerCAmelCase : Union[str, Any] = """""" if binary_search(sequence, target) else """not """ print(F"""{target} was {not_str}found in {sequence}""")
291
0
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class lowerCamelCase__( unittest.TestCase): def __init__( self: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Any=7 , UpperCamelCase_: Union[str, Any]=3 , UpperCamelCase_: Optional[int]=18 , UpperCamelCase_: Optional[Any]=30 , UpperCamelCase_: Any=4_00 , UpperCamelCase_: str=True , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=True , UpperCamelCase_: str=False , UpperCamelCase_: int=True , UpperCamelCase_: Any=True , UpperCamelCase_: Optional[int]=[0.5, 0.5, 0.5] , UpperCamelCase_: Optional[Any]=[0.5, 0.5, 0.5] , ): __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = num_channels __lowerCamelCase = image_size __lowerCamelCase = min_resolution __lowerCamelCase = max_resolution __lowerCamelCase = do_resize __lowerCamelCase = size if size is not None else {"""height""": 18, """width""": 20} __lowerCamelCase = do_thumbnail __lowerCamelCase = do_align_axis __lowerCamelCase = do_pad __lowerCamelCase = do_normalize __lowerCamelCase = image_mean __lowerCamelCase = image_std def lowerCAmelCase__ ( self: Optional[Any] ): return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class lowerCamelCase__( UpperCAmelCase__ , unittest.TestCase): UpperCAmelCase__ : Tuple = DonutImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = DonutImageProcessingTester(self ) @property def lowerCAmelCase__ ( self: Optional[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self: int ): __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , """do_resize""" ) ) self.assertTrue(hasattr(_a , """size""" ) ) self.assertTrue(hasattr(_a , """do_thumbnail""" ) ) self.assertTrue(hasattr(_a , """do_align_long_axis""" ) ) self.assertTrue(hasattr(_a , """do_pad""" ) ) self.assertTrue(hasattr(_a , """do_normalize""" ) ) self.assertTrue(hasattr(_a , """image_mean""" ) ) self.assertTrue(hasattr(_a , """image_std""" ) ) def lowerCAmelCase__ ( self: str ): __lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} ) __lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) # Previous config had dimensions in (width, height) order __lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} ) def lowerCAmelCase__ ( self: int ): pass @is_flaky() def lowerCAmelCase__ ( self: int ): __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched __lowerCamelCase = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched __lowerCamelCase = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched __lowerCamelCase = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
12
"""simple docstring""" def a__ ( snake_case__ ) -> list: if len(snake_case__ ) < 2: return collection def circle_sort_util(snake_case__ , snake_case__ , snake_case__ ) -> bool: lowerCamelCase = False if low == high: return swapped lowerCamelCase = low lowerCamelCase = high while left < right: if collection[left] > collection[right]: lowerCamelCase , lowerCamelCase = ( collection[right], collection[left], ) lowerCamelCase = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: lowerCamelCase , lowerCamelCase = ( collection[right + 1], collection[left], ) lowerCamelCase = True lowerCamelCase = low + int((high - low) / 2 ) lowerCamelCase = circle_sort_util(snake_case__ , snake_case__ , snake_case__ ) lowerCamelCase = circle_sort_util(snake_case__ , mid + 1 , snake_case__ ) return swapped or left_swap or right_swap lowerCamelCase = True while is_not_sorted is True: lowerCamelCase = circle_sort_util(snake_case__ , 0 , len(snake_case__ ) - 1 ) return collection if __name__ == "__main__": lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip() lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(""",""")] print(circle_sort(unsorted))
291
0
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCamelCase : """simple docstring""" __a : Optional[Any] = PegasusConfig __a : Optional[int] = {} __a : str = '''gelu''' def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=40 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , ) -> Tuple: '''simple docstring''' __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = eos_token_id __lowercase = pad_token_id __lowercase = bos_token_id def _SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' __lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __lowercase = tf.concat([input_ids, eos_tensor] , axis=1 ) __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __lowercase = prepare_pegasus_inputs_dict(_a , _a , _a ) return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any: '''simple docstring''' __lowercase = TFPegasusModel(config=_a ).get_decoder() __lowercase = inputs_dict['''input_ids'''] __lowercase = input_ids[:1, :] __lowercase = inputs_dict['''attention_mask'''][:1, :] __lowercase = inputs_dict['''head_mask'''] __lowercase = 1 # first forward pass __lowercase = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a ) __lowercase , __lowercase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size ) __lowercase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __lowercase = tf.concat([input_ids, next_tokens] , axis=-1 ) __lowercase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __lowercase = model(_a , attention_mask=_a )[0] __lowercase = model(_a , attention_mask=_a , past_key_values=_a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __lowercase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __lowercase = output_from_no_past[:, -3:, random_slice_idx] __lowercase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_a , _a , rtol=1E-3 ) def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , ): """simple docstring""" if attention_mask is None: __lowercase = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __lowercase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCamelCase ( UpperCAmelCase__ ,UpperCAmelCase__ ,unittest.TestCase ): """simple docstring""" __a : Optional[int] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () __a : Union[str, Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else () __a : Dict = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) __a : str = True __a : Tuple = False __a : int = False def _SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' __lowercase = TFPegasusModelTester(self ) __lowercase = ConfigTester(self , config_class=_a ) def _SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' __lowercase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_a ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCamelCase ( unittest.TestCase ): """simple docstring""" __a : str = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning \'Oh I think you\'re nominated\'\", said Dappy.\"And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around.\"At the end of the day we\'re grateful to be where we are in our careers.\"If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ''', ] __a : List[Any] = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers __a : Optional[Any] = '''google/pegasus-xsum''' @cached_property def _SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' __lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> int: '''simple docstring''' __lowercase = self.translate_src_text(**_a ) assert self.expected_text == generated_words def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> str: '''simple docstring''' __lowercase = self.tokenizer(self.src_text , **_a , padding=_a , return_tensors='''tf''' ) __lowercase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_a , ) __lowercase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a ) return generated_words @slow def _SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' self._assert_generated_batch_equal_expected()
210
"""simple docstring""" from collections.abc import Generator def a__ ( ) -> Generator[int, None, None]: lowerCamelCase , lowerCamelCase = 0, 1 while True: lowerCamelCase , lowerCamelCase = b, a + b yield b def a__ ( snake_case__ = 10_00 ) -> int: lowerCamelCase = 1 lowerCamelCase = fibonacci_generator() while len(str(next(snake_case__ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
291
0
import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("""0.8.3"""): raise Exception("""requires gluonnlp == 0.8.3""") if version.parse(mx.__version__) != version.parse("""1.5.0"""): raise Exception("""requires mxnet == 1.5.0""") logging.set_verbosity_info() a__ = logging.get_logger(__name__) a__ = """The Nymphenburg Palace is a beautiful palace in Munich!""" def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]: _snake_case : Dict = { """attention_cell""": """multi_head""", """num_layers""": 4, """units""": 1_024, """hidden_size""": 768, """max_length""": 512, """num_heads""": 8, """scaled""": True, """dropout""": 0.1, """use_residual""": True, """embed_size""": 1_024, """embed_dropout""": 0.1, """word_embed""": None, """layer_norm_eps""": 1e-5, """token_type_vocab_size""": 2, } _snake_case : Optional[int] = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py _snake_case : List[str] = BERTEncoder( attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , snake_case__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later _snake_case : Tuple = """openwebtext_ccnews_stories_books_cased""" # Specify download folder to Gluonnlp's vocab _snake_case : Union[str, Any] = os.path.join(get_home_dir() , """models""" ) _snake_case : Optional[int] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ ) _snake_case : int = nlp.model.BERTModel( snake_case__ , len(snake_case__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=snake_case__ , use_decoder=snake_case__ , ) original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ ) _snake_case : Dict = original_bort._collect_params_with_prefix() # Build our config 🤗 _snake_case : str = { """architectures""": ["""BertForMaskedLM"""], """attention_probs_dropout_prob""": predefined_args["""dropout"""], """hidden_act""": """gelu""", """hidden_dropout_prob""": predefined_args["""dropout"""], """hidden_size""": predefined_args["""embed_size"""], """initializer_range""": 0.0_2, """intermediate_size""": predefined_args["""hidden_size"""], """layer_norm_eps""": predefined_args["""layer_norm_eps"""], """max_position_embeddings""": predefined_args["""max_length"""], """model_type""": """bort""", """num_attention_heads""": predefined_args["""num_heads"""], """num_hidden_layers""": predefined_args["""num_layers"""], """pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa """type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa """vocab_size""": len(snake_case__ ), } _snake_case : Union[str, Any] = BertConfig.from_dict(snake_case__ ) _snake_case : List[Any] = BertForMaskedLM(snake_case__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(SCREAMING_SNAKE_CASE__ : int ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): _snake_case : List[Any] = hf_param.shape _snake_case : Any = to_torch(params[gluon_param] ) _snake_case : List[Any] = gluon_param.shape assert ( shape_hf == shape_gluon ), F'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers''' return gluon_param _snake_case : Optional[Any] = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" ) _snake_case : Dict = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" ) _snake_case : Optional[Any] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" ) _snake_case : List[Any] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) _snake_case : Optional[Any] = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): _snake_case : str = hf_bort_model.bert.encoder.layer[i] # self attention _snake_case : List[Any] = layer.attention.self _snake_case : Tuple = check_and_map_params( self_attn.key.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' ) _snake_case : str = check_and_map_params( self_attn.key.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' ) _snake_case : Optional[int] = check_and_map_params( self_attn.query.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' ) _snake_case : Optional[Any] = check_and_map_params( self_attn.query.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' ) _snake_case : List[str] = check_and_map_params( self_attn.value.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' ) _snake_case : List[Any] = check_and_map_params( self_attn.value.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' ) # self attention output _snake_case : Optional[Any] = layer.attention.output _snake_case : List[Any] = check_and_map_params( self_output.dense.bias , F'''encoder.transformer_cells.{i}.proj.bias''' ) _snake_case : str = check_and_map_params( self_output.dense.weight , F'''encoder.transformer_cells.{i}.proj.weight''' ) _snake_case : int = check_and_map_params( self_output.LayerNorm.bias , F'''encoder.transformer_cells.{i}.layer_norm.beta''' ) _snake_case : Tuple = check_and_map_params( self_output.LayerNorm.weight , F'''encoder.transformer_cells.{i}.layer_norm.gamma''' ) # intermediate _snake_case : str = layer.intermediate _snake_case : List[Any] = check_and_map_params( intermediate.dense.bias , F'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' ) _snake_case : List[str] = check_and_map_params( intermediate.dense.weight , F'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' ) # output _snake_case : Any = layer.output _snake_case : Dict = check_and_map_params( bert_output.dense.bias , F'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' ) _snake_case : Any = check_and_map_params( bert_output.dense.weight , F'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' ) _snake_case : List[str] = check_and_map_params( bert_output.LayerNorm.bias , F'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' ) _snake_case : List[Any] = check_and_map_params( bert_output.LayerNorm.weight , F'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models _snake_case : str = RobertaTokenizer.from_pretrained("""roberta-base""" ) _snake_case : Optional[int] = tokenizer.encode_plus(snake_case__ )["""input_ids"""] # Get gluon output _snake_case : Tuple = mx.nd.array([input_ids] ) _snake_case : Optional[Any] = original_bort(inputs=snake_case__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(snake_case__ ) _snake_case : List[Any] = BertModel.from_pretrained(snake_case__ ) hf_bort_model.eval() _snake_case : str = tokenizer.encode_plus(snake_case__ , return_tensors="""pt""" ) _snake_case : Tuple = hf_bort_model(**snake_case__ )[0] _snake_case : Any = output_gluon[0].asnumpy() _snake_case : Tuple = output_hf[0].detach().numpy() _snake_case : int = np.max(np.abs(hf_layer - gluon_layer ) ).item() _snake_case : List[Any] = np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) if success: print("""✔️ Both model do output the same tensors""" ) else: print("""❌ Both model do **NOT** output the same tensors""" ) print("""Absolute difference is:""" , snake_case__ ) if __name__ == "__main__": a__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a__ = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
317
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase : List[str] = logging.get_logger(__name__) class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["audio_values", "audio_mask"] def __init__( self , _a=2_048 , _a=1 , _a=[16, 16] , _a=128 , _a=44_100 , _a=86 , _a=2_048 , _a=0.0 , **_a , ): """simple docstring""" super().__init__( feature_size=_a , sampling_rate=_a , padding_value=_a , **_a , ) lowerCamelCase = spectrogram_length lowerCamelCase = num_channels lowerCamelCase = patch_size lowerCamelCase = feature_size // self.patch_size[1] lowerCamelCase = n_fft lowerCamelCase = sampling_rate // hop_length_to_sampling_rate lowerCamelCase = sampling_rate lowerCamelCase = padding_value lowerCamelCase = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=_a , norm="""slaney""" , mel_scale="""slaney""" , ).T def _lowerCAmelCase ( self , _a ): """simple docstring""" lowerCamelCase = spectrogram( _a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , ) lowerCamelCase = log_spec[:, :-1] lowerCamelCase = log_spec - 20.0 lowerCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , _a , _a = None , _a = True , _a = None , _a = False , _a = False , **_a , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( """This feature extractor is set to support sampling rate""" f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled' f' with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) lowerCamelCase = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) lowerCamelCase = is_batched_numpy or ( isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_a , np.ndarray ): lowerCamelCase = np.asarray(_a , dtype=np.floataa ) elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowerCamelCase = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , _a ): lowerCamelCase = [np.asarray(_a , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowerCamelCase = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowerCamelCase = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowerCamelCase = np.array(_a ).astype(np.floataa ) # convert into correct format for padding lowerCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowerCamelCase = np.ones([len(_a ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowerCamelCase = padded_audio_features * self.padding_value for i in range(len(_a ) ): lowerCamelCase = audio_features[i] lowerCamelCase = feature # return as BatchFeature if return_attention_mask: lowerCamelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask} else: lowerCamelCase = {"""audio_values""": padded_audio_features} lowerCamelCase = BatchFeature(data=_a , tensor_type=_a ) return encoded_inputs
291
0
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> bool: '''simple docstring''' return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
7
"""simple docstring""" from math import ceil def a__ ( snake_case__ , snake_case__ ) -> Optional[int]: lowerCamelCase = list(range(0 , snake_case__ ) ) lowerCamelCase = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check lowerCamelCase = [] for i in device_map_blocks: if device_map_blocks.count(snake_case__ ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(snake_case__ ) # Missing blocks lowerCamelCase = [i for i in blocks if i not in device_map_blocks] lowerCamelCase = [i for i in device_map_blocks if i not in blocks] if len(snake_case__ ) != 0: raise ValueError( """Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.""" """ These attention blocks were specified more than once: """ + str(snake_case__ ) ) if len(snake_case__ ) != 0: raise ValueError( """There are attention blocks for this model that are not specified in the device_map. Add these attention """ """blocks to a device on the device_map: """ + str(snake_case__ ) ) if len(snake_case__ ) != 0: raise ValueError( """The device_map contains more attention blocks than this model has. Remove these from the device_map:""" + str(snake_case__ ) ) def a__ ( snake_case__ , snake_case__ ) -> List[Any]: lowerCamelCase = list(range(snake_case__ ) ) lowerCamelCase = int(ceil(n_layers / len(snake_case__ ) ) ) lowerCamelCase = [layers[i : i + n_blocks] for i in range(0 , snake_case__ , snake_case__ )] return dict(zip(snake_case__ , snake_case__ ) )
291
0
'''simple docstring''' import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Tuple ) -> str: '''simple docstring''' super().__init__() _UpperCamelCase = nn.Linear(3 , 4 ) _UpperCamelCase = nn.BatchNormad(4 ) _UpperCamelCase = nn.Linear(4 , 5 ) def snake_case__ ( self : Any , lowerCAmelCase__ : List[Any] ) -> Optional[Any]: '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(_a ) ) ) class __lowerCAmelCase ( UpperCAmelCase__ ): """simple docstring""" def snake_case__ ( self : int , lowerCAmelCase__ : Union[str, Any] , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : int ) -> Dict: '''simple docstring''' return (args[0] + 1,) + args[1:], kwargs class __lowerCAmelCase ( UpperCAmelCase__ ): """simple docstring""" def snake_case__ ( self : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ) -> Optional[int]: '''simple docstring''' return output + 1 class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : int ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = ModelForTest() _UpperCamelCase = ModelHook() add_hook_to_module(_a , _a ) self.assertEqual(test_model._hf_hook , _a ) self.assertTrue(hasattr(_a , '''_old_forward''' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , '''forward''' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] ) remove_hook_from_module(_a ) self.assertFalse(hasattr(_a , '''_hf_hook''' ) ) self.assertFalse(hasattr(_a , '''_old_forward''' ) ) def snake_case__ ( self : str ) -> Dict: '''simple docstring''' _UpperCamelCase = ModelForTest() _UpperCamelCase = ModelHook() add_hook_to_module(_a , _a ) add_hook_to_module(_a , _a , append=_a ) self.assertEqual(isinstance(test_model._hf_hook , _a ) , _a ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(_a , '''_old_forward''' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , '''forward''' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] ) remove_hook_from_module(_a ) self.assertFalse(hasattr(_a , '''_hf_hook''' ) ) self.assertFalse(hasattr(_a , '''_old_forward''' ) ) def snake_case__ ( self : List[Any] ) -> Tuple: '''simple docstring''' _UpperCamelCase = ModelForTest() _UpperCamelCase = torch.randn(2 , 3 ) _UpperCamelCase = test_model(x + 1 ) _UpperCamelCase = test_model(x + 2 ) _UpperCamelCase = PreForwardHook() add_hook_to_module(_a , _a ) _UpperCamelCase = test_model(_a ) self.assertTrue(torch.allclose(_a , _a , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain _UpperCamelCase = PreForwardHook() add_hook_to_module(_a , _a ) _UpperCamelCase = test_model(_a ) self.assertTrue(torch.allclose(_a , _a , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks _UpperCamelCase = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(_a , _a ) _UpperCamelCase = test_model(_a ) assert torch.allclose(_a , _a , atol=1e-5 ) def snake_case__ ( self : str ) -> int: '''simple docstring''' _UpperCamelCase = ModelForTest() _UpperCamelCase = torch.randn(2 , 3 ) _UpperCamelCase = test_model(_a ) _UpperCamelCase = PostForwardHook() add_hook_to_module(_a , _a ) _UpperCamelCase = test_model(_a ) self.assertTrue(torch.allclose(_a , output + 1 , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain _UpperCamelCase = PostForwardHook() add_hook_to_module(_a , _a ) _UpperCamelCase = test_model(_a ) self.assertTrue(torch.allclose(_a , output + 1 , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks _UpperCamelCase = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(_a , _a ) _UpperCamelCase = test_model(_a ) assert torch.allclose(_a , output + 2 , atol=1e-5 ) def snake_case__ ( self : Tuple ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = ModelForTest() _UpperCamelCase = torch.randn(2 , 3 ) _UpperCamelCase = test_model(_a ) _UpperCamelCase = PostForwardHook() add_hook_to_module(_a , _a ) _UpperCamelCase = test_model(_a ) self.assertTrue(torch.allclose(_a , output + 1 ) ) self.assertTrue(outputa.requires_grad ) _UpperCamelCase = True _UpperCamelCase = test_model(_a ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def snake_case__ ( self : Tuple ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device _UpperCamelCase = torch.randn(2 , 3 ) _UpperCamelCase = model(_a ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(_a , AlignDevicesHook(io_same_device=_a ) ) _UpperCamelCase = torch.randn(2 , 3 ).to(0 ) _UpperCamelCase = model(_a ) self.assertEqual(output.device , torch.device(0 ) ) def snake_case__ ( self : Dict ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # This will move each submodule on different devices _UpperCamelCase = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True} add_hook_to_module(model.lineara , AlignDevicesHook(**_a ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**_a ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**_a ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) # Buffers are not included in the offload by default, so are on the execution device _UpperCamelCase = torch.device(hook_kwargs['''execution_device'''] ) self.assertEqual(model.batchnorm.running_mean.device , _a ) _UpperCamelCase = torch.randn(2 , 3 ) _UpperCamelCase = model(_a ) self.assertEqual(output.device , _a ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # Now test with buffers included in the offload _UpperCamelCase = { '''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True, '''offload_buffers''': True, } add_hook_to_module(model.lineara , AlignDevicesHook(**_a ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**_a ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**_a ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) ) _UpperCamelCase = torch.randn(2 , 3 ) _UpperCamelCase = model(_a ) self.assertEqual(output.device , _a ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) def snake_case__ ( self : str ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # This will move each submodule on different devices _UpperCamelCase = 0 if torch.cuda.is_available() else '''cpu''' attach_align_device_hook(_a , execution_device=_a , offload=_a ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) # Buffers are not included in the offload by default, so are on the execution device _UpperCamelCase = torch.device(_a ) self.assertEqual(model.batchnorm.running_mean.device , _a ) _UpperCamelCase = torch.randn(2 , 3 ) _UpperCamelCase = model(_a ) self.assertEqual(output.device , _a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(_a ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # Now test with buffers included in the offload attach_align_device_hook(_a , execution_device=_a , offload=_a , offload_buffers=_a ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) ) _UpperCamelCase = torch.randn(2 , 3 ) _UpperCamelCase = model(_a ) self.assertEqual(output.device , _a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(_a ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) def snake_case__ ( self : Optional[int] ) -> str: '''simple docstring''' _UpperCamelCase = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # This will move each submodule on different devices _UpperCamelCase = 0 if torch.cuda.is_available() else '''cpu''' attach_align_device_hook( _a , execution_device=_a , offload=_a , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) # Buffers are not included in the offload by default, so are on the execution device _UpperCamelCase = torch.device(_a ) self.assertEqual(model.batchnorm.running_mean.device , _a ) _UpperCamelCase = torch.randn(2 , 3 ) _UpperCamelCase = model(_a ) self.assertEqual(output.device , _a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(_a ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # Now test with buffers included in the offload attach_align_device_hook( _a , execution_device=_a , offload=_a , weights_map=model.state_dict() , offload_buffers=_a , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) ) _UpperCamelCase = torch.randn(2 , 3 ) _UpperCamelCase = model(_a ) self.assertEqual(output.device , _a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(_a ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
324
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __magic_name__ ( unittest.TestCase ): '''simple docstring''' def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ): """simple docstring""" lowerCamelCase = parent lowerCamelCase = batch_size lowerCamelCase = seq_length lowerCamelCase = is_training lowerCamelCase = use_attention_mask lowerCamelCase = use_token_type_ids lowerCamelCase = use_labels lowerCamelCase = vocab_size lowerCamelCase = hidden_size lowerCamelCase = num_hidden_layers lowerCamelCase = num_attention_heads lowerCamelCase = intermediate_size lowerCamelCase = hidden_act lowerCamelCase = hidden_dropout_prob lowerCamelCase = attention_probs_dropout_prob lowerCamelCase = max_position_embeddings lowerCamelCase = type_vocab_size lowerCamelCase = type_sequence_label_size lowerCamelCase = initializer_range lowerCamelCase = num_choices def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase = None if self.use_attention_mask: lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase = None if self.use_token_type_ids: lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = True __UpperCamelCase = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = FlaxRoFormerModelTester(self ) @slow def _lowerCAmelCase ( self ): """simple docstring""" for model_class_name in self.all_model_classes: lowerCamelCase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_a ) lowerCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(_a ) @require_flax class __magic_name__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) lowerCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase = model(_a )[0] lowerCamelCase = 50_000 lowerCamelCase = (1, 6, vocab_size) self.assertEqual(output.shape , _a ) lowerCamelCase = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
291
0
'''simple docstring''' from collections.abc import Generator def __a(): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase = 0, 1 while True: _lowerCAmelCase , _lowerCAmelCase = b, a + b yield b def __a(SCREAMING_SNAKE_CASE_ : int = 1000 ): '''simple docstring''' _lowerCAmelCase = 1 _lowerCAmelCase = fibonacci_generator() while len(str(next(snake_case__ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
158
"""simple docstring""" from typing import Any def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> list: _validation( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) # Creates data structures and fill initial step lowerCamelCase = {} lowerCamelCase = {} for state in states_space: lowerCamelCase = observations_space[0] lowerCamelCase = ( initial_probabilities[state] * emission_probabilities[state][observation] ) lowerCamelCase = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(snake_case__ ) ): lowerCamelCase = observations_space[o] lowerCamelCase = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function lowerCamelCase = """""" lowerCamelCase = -1 for k_state in states_space: lowerCamelCase = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: lowerCamelCase = probability lowerCamelCase = k_state # Update probabilities and pointers dicts lowerCamelCase = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) lowerCamelCase = arg_max # The final observation lowerCamelCase = observations_space[len(snake_case__ ) - 1] # argmax for given final observation lowerCamelCase = """""" lowerCamelCase = -1 for k_state in states_space: lowerCamelCase = probabilities[(k_state, final_observation)] if probability > max_probability: lowerCamelCase = probability lowerCamelCase = k_state lowerCamelCase = arg_max # Process pointers backwards lowerCamelCase = last_state lowerCamelCase = [] for o in range(len(snake_case__ ) - 1 , -1 , -1 ): result.append(snake_case__ ) lowerCamelCase = pointers[previous, observations_space[o]] result.reverse() return result def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None: _validate_not_empty( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) _validate_lists(snake_case__ , snake_case__ ) _validate_dicts( snake_case__ , snake_case__ , snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None: if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("""There's an empty parameter""" ) def a__ ( snake_case__ , snake_case__ ) -> None: _validate_list(snake_case__ , """observations_space""" ) _validate_list(snake_case__ , """states_space""" ) def a__ ( snake_case__ , snake_case__ ) -> None: if not isinstance(_object , snake_case__ ): lowerCamelCase = F'{var_name} must be a list' raise ValueError(snake_case__ ) else: for x in _object: if not isinstance(snake_case__ , snake_case__ ): lowerCamelCase = F'{var_name} must be a list of strings' raise ValueError(snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , ) -> None: _validate_dict(snake_case__ , """initial_probabilities""" , snake_case__ ) _validate_nested_dict(snake_case__ , """transition_probabilities""" ) _validate_nested_dict(snake_case__ , """emission_probabilities""" ) def a__ ( snake_case__ , snake_case__ ) -> None: _validate_dict(_object , snake_case__ , snake_case__ ) for x in _object.values(): _validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False ) -> None: if not isinstance(_object , snake_case__ ): lowerCamelCase = F'{var_name} must be a dict' raise ValueError(snake_case__ ) if not all(isinstance(snake_case__ , snake_case__ ) for x in _object ): lowerCamelCase = F'{var_name} all keys must be strings' raise ValueError(snake_case__ ) if not all(isinstance(snake_case__ , snake_case__ ) for x in _object.values() ): lowerCamelCase = """nested dictionary """ if nested else """""" lowerCamelCase = F'{var_name} {nested_text}all values must be {value_type.__name__}' raise ValueError(snake_case__ ) if __name__ == "__main__": from doctest import testmod testmod()
291
0
from PIL import Image def lowerCAmelCase__(__snake_case ,__snake_case ) -> Image: '''simple docstring''' lowerCamelCase__ = (259 * (level + 255)) / (255 * (259 - level)) def contrast(__snake_case ) -> int: return int(128 + factor * (c - 128) ) return img.point(snake_case__ ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change contrast to 170 _a = change_contrast(img, 170) cont_img.save("image_data/lena_high_contrast.png", format="png")
209
"""simple docstring""" import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase : Dict = logging.get_logger(__name__) def a__ ( snake_case__ ) -> Dict: lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" ) if "model" in sd.keys(): lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" )["""model"""] # pop unnecessary weights lowerCamelCase = [ """decoder.version""", """decoder.output_projection.weight""", ] for key in keys_to_delete: if key in sd: sd.pop(snake_case__ ) lowerCamelCase = { """decoder.project_in_dim.weight""": """decoder.project_in.weight""", """decoder.project_out_dim.weight""": """decoder.project_out.weight""", """decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""", """decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""", } for old_key, new_key in keys_to_rename.items(): if old_key in sd: lowerCamelCase = sd.pop(snake_case__ ) lowerCamelCase = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: lowerCamelCase = sd[key] # We split QKV in separate Q,K,V lowerCamelCase = key.replace(""".qkv_proj.""" , """.q_proj.""" ) lowerCamelCase = key.replace(""".qkv_proj.""" , """.k_proj.""" ) lowerCamelCase = key.replace(""".qkv_proj.""" , """.v_proj.""" ) lowerCamelCase = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 lowerCamelCase , lowerCamelCase , lowerCamelCase = torch.split(snake_case__ , depth // 3 , dim=0 ) lowerCamelCase = q lowerCamelCase = k lowerCamelCase = v del sd[key] return sd @torch.no_grad() def a__ ( snake_case__ , snake_case__ , snake_case__=None ) -> Tuple: lowerCamelCase = load_checkpoint(snake_case__ ) if config is not None: lowerCamelCase = OPTConfig.from_pretrained(snake_case__ ) else: lowerCamelCase = OPTConfig() lowerCamelCase = OPTModel(snake_case__ ).half().eval() model.load_state_dict(snake_case__ ) # Check results Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) model.save_pretrained(snake_case__ ) if __name__ == "__main__": lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--fairseq_path""", type=str, help=( """path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:""" """ https://huggingface.co/models?other=opt_metasq""" ), ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""") lowerCAmelCase : Optional[Any] = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
291
0
import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 _UpperCAmelCase : List[Any] = 0b101_100_111_110_110_010_010_000_011_110_111_011_000_110_011_110 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 _UpperCAmelCase : List[str] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class lowercase : def __init__( self ) -> Any: """simple docstring""" UpperCamelCase = WATERMARK_BITS UpperCamelCase = WatermarkEncoder() self.encoder.set_watermark('bits' , self.watermark ) def __UpperCamelCase ( self , A_ ) -> Dict: """simple docstring""" # can't encode images that are smaller than 256 if images.shape[-1] < 256: return images UpperCamelCase = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy() UpperCamelCase = [self.encoder.encode(_a , 'dwtDct' ) for image in images] UpperCamelCase = torch.from_numpy(np.array(_a ) ).permute(0 , 3 , 1 , 2 ) UpperCamelCase = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 ) return images
222
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class __magic_name__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = tempfile.mkdtemp() # fmt: off lowerCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""] # fmt: on lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) lowerCamelCase = { """do_resize""": True, """size""": {"""height""": 18, """width""": 18}, """do_normalize""": True, """image_mean""": [0.5, 0.5, 0.5], """image_std""": [0.5, 0.5, 0.5], } lowerCamelCase = os.path.join(self.tmpdirname , _a ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(_a , _a ) def _lowerCAmelCase ( self , **_a ): """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **_a ) def _lowerCAmelCase ( self , **_a ): """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a ) def _lowerCAmelCase ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_tokenizer() lowerCamelCase = self.get_image_processor() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowerCamelCase = self.get_image_processor(do_normalize=_a , padding_value=1.0 ) lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = self.prepare_image_inputs() lowerCamelCase = image_processor(_a , return_tensors="""np""" ) lowerCamelCase = processor(images=_a , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = """lower newer""" lowerCamelCase = processor(text=_a ) lowerCamelCase = tokenizer(_a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = """lower newer""" lowerCamelCase = self.prepare_image_inputs() lowerCamelCase = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with self.assertRaises(_a ): processor() def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase = processor.batch_decode(_a ) lowerCamelCase = tokenizer.batch_decode(_a ) self.assertListEqual(_a , _a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.get_image_processor() lowerCamelCase = self.get_tokenizer() lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) lowerCamelCase = """lower newer""" lowerCamelCase = self.prepare_image_inputs() lowerCamelCase = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
291
0
"""simple docstring""" from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowercase_ = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase__ ) class snake_case ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Dict, **_lowerCamelCase : int ): '''simple docstring''' super().__init__(**_a ) requires_backends(self, '''vision''' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : int, _lowerCamelCase : Dict, **_lowerCamelCase : str ): '''simple docstring''' return super().__call__(_a, **_a ) def _SCREAMING_SNAKE_CASE ( self : List[Any], **_lowerCamelCase : Tuple ): '''simple docstring''' __A = {} if "candidate_labels" in kwargs: __A = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: __A = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : int, _lowerCamelCase : Optional[int]=None, _lowerCamelCase : Optional[Any]="This is a photo of {}." ): '''simple docstring''' __A = load_image(_a ) __A = self.image_processor(images=[image], return_tensors=self.framework ) __A = candidate_labels __A = [hypothesis_template.format(_a ) for x in candidate_labels] __A = self.tokenizer(_a, return_tensors=self.framework, padding=_a ) __A = [text_inputs] return inputs def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Tuple ): '''simple docstring''' __A = model_inputs.pop('''candidate_labels''' ) __A = model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0], _a ): __A = text_inputs[0] else: # Batching case. __A = text_inputs[0][0] __A = self.model(**_a, **_a ) __A = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_image, } return model_outputs def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : Optional[Any] ): '''simple docstring''' __A = model_outputs.pop('''candidate_labels''' ) __A = model_outputs['''logits'''][0] if self.framework == "pt": __A = logits.softmax(dim=-1 ).squeeze(-1 ) __A = probs.tolist() if not isinstance(_a, _a ): __A = [scores] elif self.framework == "tf": __A = stable_softmax(_a, axis=-1 ) __A = probs.numpy().tolist() else: raise ValueError(f'Unsupported framework: {self.framework}' ) __A = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(_a, _a ), key=lambda _lowerCamelCase : -x[0] ) ] return result
266
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def a__ ( ) -> Union[str, Any]: lowerCamelCase = ArgumentParser( description=( """PyTorch TPU distributed training launch """ """helper utility that will spawn up """ """multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=snake_case__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=snake_case__ , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=snake_case__ ) return parser.parse_args() def a__ ( ) -> List[str]: lowerCamelCase = parse_args() # Import training_script as a module. lowerCamelCase = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowerCamelCase = script_fpath.stem lowerCamelCase = importlib.import_module(snake_case__ ) # Patch sys.argv lowerCamelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
291
0
from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __lowerCAmelCase ( UpperCAmelCase__ ): def _lowerCamelCase ( self : Optional[Any]) -> Any: """simple docstring""" _UpperCAmelCase = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(_a , 'embed_dim')) self.parent.assertTrue(hasattr(_a , 'num_heads')) class __lowerCAmelCase : def __init__( self : Dict , A : str , A : Dict=13 , A : List[str]=64 , A : int=3 , A : List[Any]=[16, 48, 96] , A : int=[1, 3, 6] , A : Dict=[1, 2, 10] , A : Tuple=[7, 3, 3] , A : List[Any]=[4, 2, 2] , A : Union[str, Any]=[2, 1, 1] , A : List[str]=[2, 2, 2] , A : List[str]=[False, False, True] , A : int=[0.0, 0.0, 0.0] , A : str=0.0_2 , A : Tuple=1E-12 , A : Union[str, Any]=True , A : List[Any]=True , A : Tuple=2 , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_sizes _UpperCAmelCase = patch_stride _UpperCAmelCase = patch_padding _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = num_labels _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = num_heads _UpperCAmelCase = stride_kv _UpperCAmelCase = depth _UpperCAmelCase = cls_token _UpperCAmelCase = attention_drop_rate _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps def _lowerCamelCase ( self : Optional[int]) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _UpperCAmelCase = None if self.use_labels: # create a random int32 tensor of given shape _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self : List[str]) -> int: """simple docstring""" return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def _lowerCamelCase ( self : Tuple , A : List[Any] , A : Dict , A : Tuple) -> Dict: """simple docstring""" _UpperCAmelCase = TFCvtModel(config=_a) _UpperCAmelCase = model(_a , training=_a) _UpperCAmelCase = (self.image_size, self.image_size) _UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1] for i in range(len(self.depth)): _UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) _UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width)) def _lowerCamelCase ( self : Optional[int] , A : str , A : str , A : Union[str, Any]) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFCvtForImageClassification(_a) _UpperCAmelCase = model(_a , labels=_a , training=_a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _lowerCamelCase ( self : str) -> str: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class __lowerCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): UpperCamelCase = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () UpperCamelCase = ( {'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification} if is_tf_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]: """simple docstring""" _UpperCAmelCase = TFCvtModelTester(self) _UpperCAmelCase = TFCvtConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37) def _lowerCamelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason='Cvt does not output attentions') def _lowerCamelCase ( self : Dict) -> Tuple: """simple docstring""" pass @unittest.skip(reason='Cvt does not use inputs_embeds') def _lowerCamelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='Cvt does not support input and output embeddings') def _lowerCamelCase ( self : str) -> Optional[int]: """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU')) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) def _lowerCamelCase ( self : Optional[int]) -> Optional[int]: """simple docstring""" super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU')) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def _lowerCamelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" super().test_keras_fit() @unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8') def _lowerCamelCase ( self : Optional[Any]) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = tf.keras.mixed_precision.Policy('mixed_float16') tf.keras.mixed_precision.set_global_policy(_a) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy('float32') def _lowerCamelCase ( self : List[str]) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_a) _UpperCAmelCase = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _a) def _lowerCamelCase ( self : str) -> str: """simple docstring""" def check_hidden_states_output(A : Tuple , A : Tuple , A : int): _UpperCAmelCase = model_class(_a) _UpperCAmelCase = model(**self._prepare_for_class(_a , _a)) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = len(self.model_tester.depth) self.assertEqual(len(_a) , _a) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(_a , _a , _a) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(_a , _a , _a) def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a) def _lowerCamelCase ( self : Any) -> Dict: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a) @slow def _lowerCamelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = TFCvtModel.from_pretrained(_a) self.assertIsNotNone(_a) def A ( ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self : Any) -> Any: """simple docstring""" return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) @slow def _lowerCamelCase ( self : Optional[int]) -> List[str]: """simple docstring""" _UpperCAmelCase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=_a , return_tensors='tf') # forward pass _UpperCAmelCase = model(**_a) # verify the logits _UpperCAmelCase = tf.TensorShape((1, 10_00)) self.assertEqual(outputs.logits.shape , _a) _UpperCAmelCase = tf.constant([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0]) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _a , atol=1E-4))
339
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : List[str] = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "sew-d" def __init__( self , _a=32 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a=2 , _a=512 , _a=256 , _a=True , _a=True , _a=("p2c", "c2p") , _a="layer_norm" , _a="gelu_python" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=0.02 , _a=1e-7 , _a=1e-5 , _a="group" , _a="gelu" , _a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _a=False , _a=128 , _a=16 , _a=True , _a=0.05 , _a=10 , _a=2 , _a=0.0 , _a=10 , _a=0 , _a="mean" , _a=False , _a=False , _a=256 , _a=0 , _a=1 , _a=2 , **_a , ): """simple docstring""" super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a ) lowerCamelCase = hidden_size lowerCamelCase = feat_extract_norm lowerCamelCase = feat_extract_activation lowerCamelCase = list(_a ) lowerCamelCase = list(_a ) lowerCamelCase = list(_a ) lowerCamelCase = conv_bias lowerCamelCase = num_conv_pos_embeddings lowerCamelCase = num_conv_pos_embedding_groups lowerCamelCase = len(self.conv_dim ) lowerCamelCase = num_hidden_layers lowerCamelCase = intermediate_size lowerCamelCase = squeeze_factor lowerCamelCase = max_position_embeddings lowerCamelCase = position_buckets lowerCamelCase = share_att_key lowerCamelCase = relative_attention lowerCamelCase = norm_rel_ebd lowerCamelCase = list(_a ) lowerCamelCase = hidden_act lowerCamelCase = num_attention_heads lowerCamelCase = hidden_dropout lowerCamelCase = attention_dropout lowerCamelCase = activation_dropout lowerCamelCase = feat_proj_dropout lowerCamelCase = final_dropout lowerCamelCase = layer_norm_eps lowerCamelCase = feature_layer_norm_eps lowerCamelCase = initializer_range lowerCamelCase = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase = apply_spec_augment lowerCamelCase = mask_time_prob lowerCamelCase = mask_time_length lowerCamelCase = mask_time_min_masks lowerCamelCase = mask_feature_prob lowerCamelCase = mask_feature_length lowerCamelCase = mask_feature_min_masks # ctc loss lowerCamelCase = ctc_loss_reduction lowerCamelCase = ctc_zero_infinity # sequence classification lowerCamelCase = use_weighted_layer_sum lowerCamelCase = classifier_proj_size @property def _lowerCAmelCase ( self ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
291
0
import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration lowerCAmelCase = pytest.mark.integration lowerCAmelCase = {"""comet"""} lowerCAmelCase = importlib.util.find_spec('fairseq') is not None lowerCAmelCase = {"""code_eval"""} lowerCAmelCase = os.name == """nt""" lowerCAmelCase = {"""bertscore""", """frugalscore""", """perplexity"""} lowerCAmelCase = importlib.util.find_spec('transformers') is not None def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" @wraps(snake_case__ ) def wrapper(self , SCREAMING_SNAKE_CASE ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('''\"test requires Fairseq\"''' ) else: test_case(self , snake_case__ ) return wrapper def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" @wraps(snake_case__ ) def wrapper(self , SCREAMING_SNAKE_CASE ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('''\"test requires transformers\"''' ) else: test_case(self , snake_case__ ) return wrapper def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" @wraps(snake_case__ ) def wrapper(self , SCREAMING_SNAKE_CASE ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('''\"test not supported on Windows\"''' ) else: test_case(self , snake_case__ ) return wrapper def _a ( ): """simple docstring""" lowercase__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) @local class _a ( parameterized.TestCase ): _lowercase : int = {} _lowercase : int = None @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' ) def lowerCamelCase_ ( self: Any , UpperCamelCase_: Optional[int] ) -> List[Any]: """simple docstring""" lowercase__ = '''[...]''' lowercase__ = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , _a ) ).module_path ) lowercase__ = datasets.load.import_main_class(metric_module.__name__ , dataset=_a ) # check parameters lowercase__ = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(_a , metric_module.__name__ ): with self.use_local_metrics(): try: lowercase__ = doctest.testmod(_a , verbose=_a , raise_on_error=_a ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Optional[Any] ) -> Dict: """simple docstring""" lowercase__ = '''[...]''' lowercase__ = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , _a ) ).module_path ) # run doctest with self.use_local_metrics(): lowercase__ = doctest.testmod(_a , verbose=_a , raise_on_error=_a ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] ) -> Optional[int]: """simple docstring""" if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](_a ): yield else: yield @contextmanager def lowerCamelCase_ ( self: Dict ) -> List[Any]: """simple docstring""" def load_local_metric(UpperCamelCase_: int , *UpperCamelCase_: Dict , **UpperCamelCase_: Tuple ): return load_metric(os.path.join('''metrics''' , _a ) , *_a , **_a ) with patch('''datasets.load_metric''' ) as mock_load_metric: lowercase__ = load_local_metric yield @classmethod def lowerCamelCase_ ( cls: Union[str, Any] , UpperCamelCase_: str ) -> int: """simple docstring""" def wrapper(UpperCamelCase_: Optional[Any] ): lowercase__ = contextmanager(_a ) lowercase__ = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('''bleurt''' ) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags class _a ( UpperCAmelCase__ ): def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Any] ) -> Optional[int]: """simple docstring""" assert len(input_dict['''input_ids'''] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor: lowercase__ = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('''bertscore''' ) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" import torch def bert_cos_score_idf(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ): return torch.tensor([[1.0, 1.0, 1.0]] * len(snake_case__ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('''bert_score.scorer.get_model''' ), patch( '''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf: lowercase__ = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('''comet''' ) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" def load_from_checkpoint(SCREAMING_SNAKE_CASE ): class _a : def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: int ) -> Union[str, Any]: """simple docstring""" assert len(_a ) == 2 lowercase__ = [0.19, 0.92] return scores, sum(_a ) / len(_a ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('''comet.download_model''' ) as mock_download_model: lowercase__ = None with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint: lowercase__ = load_from_checkpoint yield def _a ( ): """simple docstring""" lowercase__ = load_metric(os.path.join('''metrics''' , '''seqeval''' ) ) lowercase__ = '''ERROR''' lowercase__ = f'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}' with pytest.raises(snake_case__ , match=re.escape(snake_case__ ) ): metric.compute(predictions=[] , references=[] , scheme=snake_case__ )
110
"""simple docstring""" from sklearn.metrics import recall_score import datasets lowerCAmelCase : Any = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ lowerCAmelCase : Any = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ lowerCAmelCase : Any = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def _lowerCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None , _a="warn" , ): """simple docstring""" lowerCamelCase = recall_score( _a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a , zero_division=_a , ) return {"recall": float(_a ) if score.size == 1 else score}
291
0
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = 10 def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = [1, 2, 3, 4] __lowerCamelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(_a , self.block_size , 0 ) , _a ) def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] __lowerCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_a , self.block_size , 0 ) , _a ) def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] __lowerCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_a , self.block_size , 0 ) , _a ) def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = """It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.""" __lowerCamelCase, __lowerCamelCase = process_story(_a ) self.assertEqual(_a , [] ) def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = """""" __lowerCamelCase, __lowerCamelCase = process_story(_a ) self.assertEqual(_a , [] ) self.assertEqual(_a , [] ) def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = ( """It was the year of Our Lord one thousand seven hundred and """ """seventy-five\n\nSpiritual revelations were conceded to England """ """at that favoured period, as at this.\n@highlight\n\nIt was the best of times""" ) __lowerCamelCase, __lowerCamelCase = process_story(_a ) __lowerCamelCase = [ """It was the year of Our Lord one thousand seven hundred and seventy-five.""", """Spiritual revelations were conceded to England at that favoured period, as at this.""", ] self.assertEqual(_a , _a ) __lowerCamelCase = ["""It was the best of times."""] self.assertEqual(_a , _a ) def lowerCAmelCase__ ( self: int ): __lowerCamelCase = torch.tensor([1, 2, 3, 4] ) __lowerCamelCase = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(_a , 0 ).numpy() , expected.numpy() ) def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) __lowerCamelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_a , 23 ).numpy() , expected.numpy() ) def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) __lowerCamelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_a , 1 ).numpy() , expected.numpy() ) def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = 1_01 __lowerCamelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] ) __lowerCamelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) __lowerCamelCase = compute_token_type_ids(_a , _a ) np.testing.assert_array_equal(_a , _a )
12
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = dataset lowerCamelCase = process lowerCamelCase = params def __len__( self ): """simple docstring""" return len(self.dataset ) def __getitem__( self , _a ): """simple docstring""" lowerCamelCase = self.dataset[i] lowerCamelCase = self.process(_a , **self.params ) return processed class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a , _a , _a=None ): """simple docstring""" lowerCamelCase = loader lowerCamelCase = infer lowerCamelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether lowerCamelCase = None lowerCamelCase = loader_batch_size # Internal bookkeeping lowerCamelCase = None lowerCamelCase = None def __len__( self ): """simple docstring""" return len(self.loader ) def __iter__( self ): """simple docstring""" lowerCamelCase = iter(self.loader ) return self def _lowerCAmelCase ( self ): """simple docstring""" if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice lowerCamelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) lowerCamelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(_a , _a ): # Convert ModelOutput to tuple first lowerCamelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_a , _a ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around lowerCamelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. lowerCamelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 lowerCamelCase = self._loader_batch_data.__class__(_a ) self._loader_batch_index += 1 return result def _lowerCAmelCase ( self ): """simple docstring""" if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch lowerCamelCase = next(self.iterator ) lowerCamelCase = self.infer(_a , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_a , torch.Tensor ): lowerCamelCase = processed else: lowerCamelCase = list(processed.keys() )[0] lowerCamelCase = processed[key] if isinstance(_a , _a ): lowerCamelCase = len(_a ) else: lowerCamelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase = observed_batch_size # Setting internal index to unwrap the batch lowerCamelCase = processed lowerCamelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a , _a , _a=None ): """simple docstring""" super().__init__(_a , _a , _a ) def __iter__( self ): """simple docstring""" lowerCamelCase = iter(self.loader ) lowerCamelCase = None return self def _lowerCAmelCase ( self ): """simple docstring""" if self.subiterator is None: lowerCamelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item lowerCamelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators lowerCamelCase = self.infer(next(self.iterator ) , **self.params ) lowerCamelCase = next(self.subiterator ) return processed class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __iter__( self ): """simple docstring""" lowerCamelCase = iter(self.loader ) return self def _lowerCAmelCase ( self ): """simple docstring""" # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. lowerCamelCase = False lowerCamelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: lowerCamelCase = self.loader_batch_item() lowerCamelCase = item.pop("""is_last""" ) accumulator.append(_a ) if is_last: return accumulator while not is_last: lowerCamelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_a , torch.Tensor ): lowerCamelCase = processed else: lowerCamelCase = list(processed.keys() )[0] lowerCamelCase = processed[key] if isinstance(_a , _a ): lowerCamelCase = len(_a ) else: lowerCamelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase = observed_batch_size lowerCamelCase = processed lowerCamelCase = 0 while self._loader_batch_index < self.loader_batch_size: lowerCamelCase = self.loader_batch_item() lowerCamelCase = item.pop("""is_last""" ) accumulator.append(_a ) if is_last: return accumulator else: lowerCamelCase = processed lowerCamelCase = item.pop("""is_last""" ) accumulator.append(_a ) return accumulator class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a ): """simple docstring""" lowerCamelCase = dataset lowerCamelCase = key def __len__( self ): """simple docstring""" return len(self.dataset ) def __getitem__( self , _a ): """simple docstring""" return self.dataset[i][self.key] class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = dataset lowerCamelCase = keya lowerCamelCase = keya def __len__( self ): """simple docstring""" return len(self.dataset ) def __getitem__( self , _a ): """simple docstring""" return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
291
0
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __a : str = logging.get_logger(__name__) __a : List[str] = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class _UpperCamelCase ( UpperCAmelCase__ ): """simple docstring""" __a : Optional[Any] = '''efficientnet''' def __init__( self , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 6_00 , lowerCAmelCase__ = 2.0 , lowerCAmelCase__ = 3.1 , lowerCAmelCase__ = 8 , lowerCAmelCase__ = [3, 3, 5, 3, 5, 5, 3] , lowerCAmelCase__ = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCAmelCase__ = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCAmelCase__ = [] , lowerCAmelCase__ = [1, 2, 2, 2, 1, 2, 1] , lowerCAmelCase__ = [1, 2, 2, 3, 3, 4, 1] , lowerCAmelCase__ = [1, 6, 6, 6, 6, 6, 6] , lowerCAmelCase__ = 0.25 , lowerCAmelCase__ = "swish" , lowerCAmelCase__ = 25_60 , lowerCAmelCase__ = "mean" , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 0.001 , lowerCAmelCase__ = 0.99 , lowerCAmelCase__ = 0.5 , lowerCAmelCase__ = 0.2 , **lowerCAmelCase__ , ) -> Union[str, Any]: '''simple docstring''' super().__init__(**_a ) __lowercase = num_channels __lowercase = image_size __lowercase = width_coefficient __lowercase = depth_coefficient __lowercase = depth_divisor __lowercase = kernel_sizes __lowercase = in_channels __lowercase = out_channels __lowercase = depthwise_padding __lowercase = strides __lowercase = num_block_repeats __lowercase = expand_ratios __lowercase = squeeze_expansion_ratio __lowercase = hidden_act __lowercase = hidden_dim __lowercase = pooling_type __lowercase = initializer_range __lowercase = batch_norm_eps __lowercase = batch_norm_momentum __lowercase = dropout_rate __lowercase = drop_connect_rate __lowercase = sum(_a ) * 4 class _UpperCamelCase ( UpperCAmelCase__ ): """simple docstring""" __a : List[Any] = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' return 1E-5
210
"""simple docstring""" def a__ ( snake_case__ ) -> bool: lowerCamelCase = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def a__ ( snake_case__ = 50_00 ) -> int: lowerCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , snake_case__ )] for i, pentagonal_i in enumerate(snake_case__ ): for j in range(snake_case__ , len(snake_case__ ) ): lowerCamelCase = pentagonal_nums[j] lowerCamelCase = pentagonal_i + pentagonal_j lowerCamelCase = pentagonal_j - pentagonal_i if is_pentagonal(snake_case__ ) and is_pentagonal(snake_case__ ): return b return -1 if __name__ == "__main__": print(F"""{solution() = }""")
291
0
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING a__ = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase__ ) class snake_case ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[str] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[str]) -> Optional[Any]: """simple docstring""" super().__init__(*_a , **_a) requires_backends(self , """vision""") self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING) def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Optional[int]=None) -> Any: """simple docstring""" _snake_case : Dict = {} if top_k is not None: _snake_case : Optional[int] = top_k return {}, {}, postprocess_params def __call__( self : int , lowerCAmelCase : Dict , **lowerCAmelCase : str) -> List[Any]: """simple docstring""" return super().__call__(_a , **_a) def UpperCamelCase_ ( self : int , lowerCAmelCase : List[str]) -> int: """simple docstring""" _snake_case : str = load_image(_a) _snake_case : int = self.image_processor(images=_a , return_tensors=self.framework) return model_inputs def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : str) -> List[Any]: """simple docstring""" _snake_case : List[str] = self.model(**_a) return model_outputs def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int]=5) -> Union[str, Any]: """simple docstring""" if top_k > self.model.config.num_labels: _snake_case : Optional[Any] = self.model.config.num_labels if self.framework == "pt": _snake_case : List[str] = model_outputs.logits.softmax(-1)[0] _snake_case , _snake_case : Dict = probs.topk(_a) elif self.framework == "tf": _snake_case : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0] _snake_case : Any = tf.math.top_k(_a , k=_a) _snake_case , _snake_case : Dict = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F'''Unsupported framework: {self.framework}''') _snake_case : Optional[int] = scores.tolist() _snake_case : Any = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_a , _a)]
317
"""simple docstring""" from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging lowerCAmelCase : Tuple = logging.get_logger(__name__) def a__ ( snake_case__ , snake_case__ ) -> Tuple: try: with open(snake_case__ , """rb""" ) as flax_state_f: lowerCamelCase = from_bytes(snake_case__ , flax_state_f.read() ) except UnpicklingError as e: try: with open(snake_case__ ) as f: if f.read().startswith("""version""" ): raise OSError( """You seem to have cloned a repository without having git-lfs installed. Please""" """ install git-lfs and run `git lfs install` followed by `git lfs pull` in the""" """ folder you cloned.""" ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def a__ ( snake_case__ , snake_case__ ) -> Tuple: try: import torch # noqa: F401 except ImportError: logger.error( """Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights lowerCamelCase = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) lowerCamelCase = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) lowerCamelCase = """""" lowerCamelCase = flatten_dict(snake_case__ , sep=""".""" ) lowerCamelCase = pt_model.state_dict() # keep track of unexpected & missing keys lowerCamelCase = [] lowerCamelCase = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCamelCase = flax_key_tuple.split(""".""" ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""] lowerCamelCase = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""] lowerCamelCase = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(snake_case__ ): lowerCamelCase = ( flax_key_tuple_string.replace("""_0""" , """.0""" ) .replace("""_1""" , """.1""" ) .replace("""_2""" , """.2""" ) .replace("""_3""" , """.3""" ) .replace("""_4""" , """.4""" ) .replace("""_5""" , """.5""" ) .replace("""_6""" , """.6""" ) .replace("""_7""" , """.7""" ) .replace("""_8""" , """.8""" ) .replace("""_9""" , """.9""" ) ) lowerCamelCase = """.""".join(snake_case__ ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict lowerCamelCase = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor lowerCamelCase = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list lowerCamelCase = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) if len(snake_case__ ) > 0: logger.warning( F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' """ use it for predictions and inference.""" ) return pt_model
291
0