code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class _snake_case ( metaclass=__snake_case ): """simple docstring""" a = ["flax", "transformers"] def __init__( self : Tuple , *_A : int , **_A : Dict): """simple docstring""" requires_backends(self , ["""flax""", """transformers"""]) @classmethod def _lowerCAmelCase ( cls : Optional[int] , *_A : Optional[int] , **_A : List[str]): """simple docstring""" requires_backends(cls , ["""flax""", """transformers"""]) @classmethod def _lowerCAmelCase ( cls : int , *_A : Union[str, Any] , **_A : Any): """simple docstring""" requires_backends(cls , ["""flax""", """transformers"""]) class _snake_case ( metaclass=__snake_case ): """simple docstring""" a = ["flax", "transformers"] def __init__( self : str , *_A : str , **_A : Optional[Any]): """simple docstring""" requires_backends(self , ["""flax""", """transformers"""]) @classmethod def _lowerCAmelCase ( cls : Union[str, Any] , *_A : Any , **_A : List[str]): """simple docstring""" requires_backends(cls , ["""flax""", """transformers"""]) @classmethod def _lowerCAmelCase ( cls : Tuple , *_A : str , **_A : List[Any]): """simple docstring""" requires_backends(cls , ["""flax""", """transformers"""]) class _snake_case ( metaclass=__snake_case ): """simple docstring""" a = ["flax", "transformers"] def __init__( self : List[Any] , *_A : Tuple , **_A : Union[str, Any]): """simple docstring""" requires_backends(self , ["""flax""", """transformers"""]) @classmethod def _lowerCAmelCase ( cls : Optional[Any] , *_A : int , **_A : str): """simple docstring""" requires_backends(cls , ["""flax""", """transformers"""]) @classmethod def _lowerCAmelCase ( cls : Dict , *_A : List[str] , **_A : Dict): """simple docstring""" requires_backends(cls , ["""flax""", """transformers"""]) class _snake_case ( metaclass=__snake_case ): """simple docstring""" a = ["flax", "transformers"] def __init__( self : List[str] , *_A : int , **_A : int): """simple docstring""" requires_backends(self , ["""flax""", """transformers"""]) @classmethod def _lowerCAmelCase ( cls : Tuple , *_A : Optional[int] , **_A : Dict): """simple docstring""" requires_backends(cls , ["""flax""", """transformers"""]) @classmethod def _lowerCAmelCase ( cls : Union[str, Any] , *_A : int , **_A : Dict): """simple docstring""" requires_backends(cls , ["""flax""", """transformers"""])
635
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _SCREAMING_SNAKE_CASE : Optional[int] = TapasConfig.from_json_file(__SCREAMING_SNAKE_CASE ) # set absolute/relative position embeddings parameter _SCREAMING_SNAKE_CASE : Dict = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _SCREAMING_SNAKE_CASE : str = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WTQ": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : Optional[int] = 4 _SCREAMING_SNAKE_CASE : Any = True # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 0.66_46_94 _SCREAMING_SNAKE_CASE : str = 0.20_79_51 _SCREAMING_SNAKE_CASE : str = 0.12_11_94 _SCREAMING_SNAKE_CASE : List[Any] = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = 0.0_35_25_13 _SCREAMING_SNAKE_CASE : Optional[Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : int = 4 _SCREAMING_SNAKE_CASE : Tuple = False # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 36.45_19 _SCREAMING_SNAKE_CASE : Union[str, Any] = 0.90_34_21 _SCREAMING_SNAKE_CASE : Optional[Any] = 2_22.0_88 _SCREAMING_SNAKE_CASE : Any = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Optional[int] = True _SCREAMING_SNAKE_CASE : Dict = 0.76_31_41 _SCREAMING_SNAKE_CASE : Union[str, Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "TABFACT": _SCREAMING_SNAKE_CASE : int = TapasForSequenceClassification(config=__SCREAMING_SNAKE_CASE ) elif task == "MLM": _SCREAMING_SNAKE_CASE : int = TapasForMaskedLM(config=__SCREAMING_SNAKE_CASE ) elif task == "INTERMEDIATE_PRETRAINING": _SCREAMING_SNAKE_CASE : int = TapasModel(config=__SCREAMING_SNAKE_CASE ) else: raise ValueError(F"""Task {task} not supported.""" ) print(F"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Save pytorch-model (weights and configuration) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Save tokenizer files print(F"""Save tokenizer files to {pytorch_dump_path}""" ) _SCREAMING_SNAKE_CASE : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 ) tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
635
1
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE = 50 )-> int: _SCREAMING_SNAKE_CASE : List[Any] = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F"{solution() = }")
635
"""simple docstring""" from typing import Any import numpy as np def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool: return np.array_equal(__SCREAMING_SNAKE_CASE , matrix.conjugate().T ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: _SCREAMING_SNAKE_CASE : Optional[int] = v.conjugate().T _SCREAMING_SNAKE_CASE : Optional[int] = v_star.dot(__SCREAMING_SNAKE_CASE ) assert isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) return (v_star_dot.dot(__SCREAMING_SNAKE_CASE )) / (v_star.dot(__SCREAMING_SNAKE_CASE )) def lowerCamelCase_()-> None: _SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) _SCREAMING_SNAKE_CASE : int = np.array([[1], [2], [3]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" print(rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" assert rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
635
1
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase_ = logging.get_logger(__name__) class _snake_case ( __snake_case ): """simple docstring""" a = ["audio_values", "audio_mask"] def __init__( self : Optional[Any] , _A : Any=2_0_4_8 , _A : Dict=1 , _A : Union[str, Any]=[1_6, 1_6] , _A : List[str]=1_2_8 , _A : Tuple=4_4_1_0_0 , _A : Tuple=8_6 , _A : Dict=2_0_4_8 , _A : Optional[Any]=0.0 , **_A : Optional[Any] , ): """simple docstring""" super().__init__( feature_size=_A , sampling_rate=_A , padding_value=_A , **_A , ) _SCREAMING_SNAKE_CASE : Any = spectrogram_length _SCREAMING_SNAKE_CASE : int = num_channels _SCREAMING_SNAKE_CASE : Dict = patch_size _SCREAMING_SNAKE_CASE : Dict = feature_size // self.patch_size[1] _SCREAMING_SNAKE_CASE : Any = n_fft _SCREAMING_SNAKE_CASE : List[str] = sampling_rate // hop_length_to_sampling_rate _SCREAMING_SNAKE_CASE : str = sampling_rate _SCREAMING_SNAKE_CASE : Any = padding_value _SCREAMING_SNAKE_CASE : Dict = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=_A , norm="""slaney""" , mel_scale="""slaney""" , ).T def _lowerCAmelCase ( self : Any , _A : np.array): """simple docstring""" _SCREAMING_SNAKE_CASE : str = spectrogram( _A , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , ) _SCREAMING_SNAKE_CASE : List[str] = log_spec[:, :-1] _SCREAMING_SNAKE_CASE : str = log_spec - 20.0 _SCREAMING_SNAKE_CASE : Dict = np.clip(log_spec / 40.0 , -2.0 , 0.0) + 1.0 return log_spec def __call__( self : Dict , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = True , _A : Optional[int] = None , _A : bool = False , _A : bool = False , **_A : Tuple , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( """This feature extractor is set to support sampling rate""" f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""") else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""") _SCREAMING_SNAKE_CASE : Tuple = isinstance(_A , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""") _SCREAMING_SNAKE_CASE : Dict = is_batched_numpy or ( isinstance(_A , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: _SCREAMING_SNAKE_CASE : Dict = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech] elif not is_batched and not isinstance(_A , np.ndarray): _SCREAMING_SNAKE_CASE : Dict = np.asarray(_A , dtype=np.floataa) elif isinstance(_A , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): _SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa) # always return batch if not is_batched: _SCREAMING_SNAKE_CASE : str = [np.asarray([raw_speech]).T] # Convert audio signals to log mel spectrograms, truncate by time axis _SCREAMING_SNAKE_CASE : str = [ self._np_extract_fbank_features(waveform.squeeze()).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , _A): _SCREAMING_SNAKE_CASE : Dict = [np.asarray(_A , dtype=np.floataa) for feature in audio_features] # Create audio attention mask _SCREAMING_SNAKE_CASE : int = max( [ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features]) # The maximum number of audio patches in a batch if return_attention_mask: _SCREAMING_SNAKE_CASE : Dict = [ (ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [0] for feature in audio_features ] _SCREAMING_SNAKE_CASE : List[str] = np.array(_A).astype(np.floataa) # convert into correct format for padding _SCREAMING_SNAKE_CASE : Any = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch _SCREAMING_SNAKE_CASE : Tuple = np.ones([len(_A), 1, max_time_len, self.feature_size]).astype(np.floataa) _SCREAMING_SNAKE_CASE : List[Any] = padded_audio_features * self.padding_value for i in range(len(_A)): _SCREAMING_SNAKE_CASE : Tuple = audio_features[i] _SCREAMING_SNAKE_CASE : int = feature # return as BatchFeature if return_attention_mask: _SCREAMING_SNAKE_CASE : Dict = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask} else: _SCREAMING_SNAKE_CASE : Optional[int] = {"""audio_values""": padded_audio_features} _SCREAMING_SNAKE_CASE : Union[str, Any] = BatchFeature(data=_A , tensor_type=_A) return encoded_inputs
635
"""simple docstring""" from __future__ import annotations def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative in a semiconductor""" ) elif hole_conc < 0: raise ValueError("""Hole concentration cannot be negative in a semiconductor""" ) elif intrinsic_conc < 0: raise ValueError( """Intrinsic concentration cannot be negative in a semiconductor""" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
635
1
"""simple docstring""" import numpy as np def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> np.array: return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
635
"""simple docstring""" import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase_ = 16 lowerCAmelCase_ = 32 def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 )-> str: _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetDict( { """train""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """validation""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """test""": dataset["""validation"""], } ) def tokenize_function(__SCREAMING_SNAKE_CASE ): # max_length=None => use the model max length (it's actually the default) _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _SCREAMING_SNAKE_CASE : str = datasets.map( __SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__SCREAMING_SNAKE_CASE ): # On TPU it's best to pad everything to the same length or training will be very slow. _SCREAMING_SNAKE_CASE : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _SCREAMING_SNAKE_CASE : Optional[Any] = 16 elif accelerator.mixed_precision != "no": _SCREAMING_SNAKE_CASE : Any = 8 else: _SCREAMING_SNAKE_CASE : Optional[int] = None return tokenizer.pad( __SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , ) # Instantiate dataloaders. _SCREAMING_SNAKE_CASE : int = DataLoader( tokenized_datasets["""train"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["""validation"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets["""test"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader, test_dataloader def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: # New Code # _SCREAMING_SNAKE_CASE : Union[str, Any] = [] # Download the dataset _SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""glue""" , """mrpc""" ) # Create our splits _SCREAMING_SNAKE_CASE : Dict = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator _SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _SCREAMING_SNAKE_CASE : Tuple = config["""lr"""] _SCREAMING_SNAKE_CASE : Tuple = int(config["""num_epochs"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""seed"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""batch_size"""] ) _SCREAMING_SNAKE_CASE : List[str] = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation _SCREAMING_SNAKE_CASE : Any = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _SCREAMING_SNAKE_CASE : List[str] = batch_size // MAX_GPU_BATCH_SIZE _SCREAMING_SNAKE_CASE : List[str] = MAX_GPU_BATCH_SIZE set_seed(__SCREAMING_SNAKE_CASE ) # New Code # # Create our folds: _SCREAMING_SNAKE_CASE : List[str] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] ) _SCREAMING_SNAKE_CASE : Optional[Any] = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = get_fold_dataloaders( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device ) # Instantiate optimizer _SCREAMING_SNAKE_CASE : int = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE ) # Instantiate scheduler _SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup( optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(__SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = outputs.loss _SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps accelerator.backward(__SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , ) _SCREAMING_SNAKE_CASE : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE ) # New Code # # We also run predictions on the test set at the very end _SCREAMING_SNAKE_CASE : str = [] for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: _SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) _SCREAMING_SNAKE_CASE : List[str] = torch.stack(__SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) _SCREAMING_SNAKE_CASE : int = metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE ) accelerator.print("""Average test metrics from all folds:""" , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_()-> Optional[Any]: _SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) # New Code # parser.add_argument("""--num_folds""" , type=__SCREAMING_SNAKE_CASE , default=3 , help="""The number of splits to perform across the dataset""" ) _SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() _SCREAMING_SNAKE_CASE : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
635
1
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _snake_case : """simple docstring""" @staticmethod def _lowerCAmelCase ( *_A : Union[str, Any] , **_A : List[Any]): """simple docstring""" pass @is_pipeline_test @require_vision @require_timm @require_torch class _snake_case ( unittest.TestCase ): """simple docstring""" a = MODEL_FOR_OBJECT_DETECTION_MAPPING def _lowerCAmelCase ( self : Union[str, Any] , _A : Optional[int] , _A : Any , _A : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = ObjectDetectionPipeline(model=_A , image_processor=_A) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def _lowerCAmelCase ( self : List[str] , _A : List[str] , _A : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0) self.assertGreater(len(_A) , 0) for detected_object in outputs: self.assertEqual( _A , { """score""": ANY(_A), """label""": ANY(_A), """box""": {"""xmin""": ANY(_A), """ymin""": ANY(_A), """xmax""": ANY(_A), """ymax""": ANY(_A)}, } , ) import datasets _SCREAMING_SNAKE_CASE : str = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""") _SCREAMING_SNAKE_CASE : Dict = [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png"""), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] _SCREAMING_SNAKE_CASE : Tuple = object_detector(_A , threshold=0.0) self.assertEqual(len(_A) , len(_A)) for outputs in batch_outputs: self.assertGreater(len(_A) , 0) for detected_object in outputs: self.assertEqual( _A , { """score""": ANY(_A), """label""": ANY(_A), """box""": {"""xmin""": ANY(_A), """ymin""": ANY(_A), """xmax""": ANY(_A), """ymax""": ANY(_A)}, } , ) @require_tf @unittest.skip("""Object detection not implemented in TF""") def _lowerCAmelCase ( self : Dict): """simple docstring""" pass @require_torch def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = """hf-internal-testing/tiny-detr-mobilenetsv3""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForObjectDetection.from_pretrained(_A) _SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = ObjectDetectionPipeline(model=_A , feature_extractor=_A) _SCREAMING_SNAKE_CASE : Optional[Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0) self.assertEqual( nested_simplify(_A , decimals=4) , [ {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}}, {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}}, ] , ) _SCREAMING_SNAKE_CASE : Any = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(_A , decimals=4) , [ [ {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}}, {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}}, ], [ {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}}, {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}}, ], ] , ) @require_torch @slow def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = """facebook/detr-resnet-50""" _SCREAMING_SNAKE_CASE : str = AutoModelForObjectDetection.from_pretrained(_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_A) _SCREAMING_SNAKE_CASE : Dict = ObjectDetectionPipeline(model=_A , feature_extractor=_A) _SCREAMING_SNAKE_CASE : List[Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""") self.assertEqual( nested_simplify(_A , decimals=4) , [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}}, ] , ) _SCREAMING_SNAKE_CASE : Dict = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ]) self.assertEqual( nested_simplify(_A , decimals=4) , [ [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}}, ], [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}}, ], ] , ) @require_torch @slow def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = """facebook/detr-resnet-50""" _SCREAMING_SNAKE_CASE : Dict = pipeline("""object-detection""" , model=_A) _SCREAMING_SNAKE_CASE : Optional[Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""") self.assertEqual( nested_simplify(_A , decimals=4) , [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}}, ] , ) _SCREAMING_SNAKE_CASE : Optional[int] = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ]) self.assertEqual( nested_simplify(_A , decimals=4) , [ [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}}, ], [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}}, ], ] , ) @require_torch @slow def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = 0.9_985 _SCREAMING_SNAKE_CASE : List[str] = """facebook/detr-resnet-50""" _SCREAMING_SNAKE_CASE : List[Any] = pipeline("""object-detection""" , model=_A) _SCREAMING_SNAKE_CASE : Dict = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=_A) self.assertEqual( nested_simplify(_A , decimals=4) , [ {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}}, ] , ) @require_torch @require_pytesseract @slow def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = """Narsil/layoutlmv3-finetuned-funsd""" _SCREAMING_SNAKE_CASE : Dict = 0.9_993 _SCREAMING_SNAKE_CASE : Any = pipeline("""object-detection""" , model=_A , threshold=_A) _SCREAMING_SNAKE_CASE : int = object_detector( """https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""") self.assertEqual( nested_simplify(_A , decimals=4) , [ {"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_9_4, """ymin""": 2_5_4, """xmax""": 3_4_3, """ymax""": 2_6_4}}, {"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_9_4, """ymin""": 2_5_4, """xmax""": 3_4_3, """ymax""": 2_6_4}}, ] , )
635
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
1
"""simple docstring""" from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase_ = { '''vocab_file''': { '''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json''' }, '''merges_file''': { '''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt''' }, } lowerCAmelCase_ = {'''allegro/herbert-base-cased''': 514} lowerCAmelCase_ = {} class _snake_case ( __snake_case ): """simple docstring""" a = VOCAB_FILES_NAMES a = PRETRAINED_VOCAB_FILES_MAP a = PRETRAINED_INIT_CONFIGURATION a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a = HerbertTokenizer def __init__( self : int , _A : List[Any]=None , _A : Optional[Any]=None , _A : int=None , _A : Any="<s>" , _A : Union[str, Any]="<unk>" , _A : List[Any]="<pad>" , _A : str="<mask>" , _A : Any="</s>" , **_A : str , ): """simple docstring""" super().__init__( _A , _A , tokenizer_file=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , sep_token=_A , **_A , ) def _lowerCAmelCase ( self : Any , _A : List[int] , _A : Optional[List[int]] = None): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id] _SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowerCAmelCase ( self : str , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A) if token_ids_a is None: return [1] + ([0] * len(_A)) + [1] return [1] + ([0] * len(_A)) + [1] + ([0] * len(_A)) + [1] def _lowerCAmelCase ( self : int , _A : List[int] , _A : Optional[List[int]] = None): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id] _SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def _lowerCAmelCase ( self : Any , _A : str , _A : Optional[str] = None): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = self._tokenizer.model.save(_A , name=_A) return tuple(_A)
635
"""simple docstring""" import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class _snake_case : """simple docstring""" def __init__( self : int , _A : List[Any] , _A : int , _A : int): """simple docstring""" if dst_width < 0 or dst_height < 0: raise ValueError("""Destination width/height should be > 0""") _SCREAMING_SNAKE_CASE : str = img _SCREAMING_SNAKE_CASE : Optional[Any] = img.shape[1] _SCREAMING_SNAKE_CASE : Tuple = img.shape[0] _SCREAMING_SNAKE_CASE : Any = dst_width _SCREAMING_SNAKE_CASE : Any = dst_height _SCREAMING_SNAKE_CASE : Any = self.src_w / self.dst_w _SCREAMING_SNAKE_CASE : Dict = self.src_h / self.dst_h _SCREAMING_SNAKE_CASE : Optional[Any] = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_5_5 ) def _lowerCAmelCase ( self : Tuple): """simple docstring""" for i in range(self.dst_h): for j in range(self.dst_w): _SCREAMING_SNAKE_CASE : Any = self.img[self.get_y(_A)][self.get_x(_A)] def _lowerCAmelCase ( self : int , _A : int): """simple docstring""" return int(self.ratio_x * x) def _lowerCAmelCase ( self : str , _A : int): """simple docstring""" return int(self.ratio_y * y) if __name__ == "__main__": lowerCAmelCase_ , lowerCAmelCase_ = 800, 600 lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1) lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output ) waitKey(0) destroyAllWindows()
635
1
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class _snake_case : """simple docstring""" a = None a = None a = None # sigma(t_i) @classmethod def _lowerCAmelCase ( cls : Optional[int]): """simple docstring""" return cls() @dataclass class _snake_case ( __snake_case ): """simple docstring""" a = 42 a = 42 a = 42 class _snake_case ( __snake_case , __snake_case ): """simple docstring""" @property def _lowerCAmelCase ( self : Tuple): """simple docstring""" return True @register_to_config def __init__( self : Dict , _A : float = 0.02 , _A : float = 1_0_0 , _A : float = 1.007 , _A : float = 8_0 , _A : float = 0.05 , _A : float = 5_0 , ): """simple docstring""" pass def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" return KarrasVeSchedulerState.create() def _lowerCAmelCase ( self : List[Any] , _A : KarrasVeSchedulerState , _A : int , _A : Tuple = ()): """simple docstring""" _SCREAMING_SNAKE_CASE : str = jnp.arange(0 , _A)[::-1].copy() _SCREAMING_SNAKE_CASE : str = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=_A , schedule=jnp.array(_A , dtype=jnp.floataa) , timesteps=_A , ) def _lowerCAmelCase ( self : str , _A : KarrasVeSchedulerState , _A : jnp.ndarray , _A : float , _A : random.KeyArray , ): """simple docstring""" if self.config.s_min <= sigma <= self.config.s_max: _SCREAMING_SNAKE_CASE : int = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1) else: _SCREAMING_SNAKE_CASE : List[Any] = 0 # sample eps ~ N(0, S_noise^2 * I) _SCREAMING_SNAKE_CASE : Dict = random.split(_A , num=1) _SCREAMING_SNAKE_CASE : Any = self.config.s_noise * random.normal(key=_A , shape=sample.shape) _SCREAMING_SNAKE_CASE : Tuple = sigma + gamma * sigma _SCREAMING_SNAKE_CASE : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _lowerCAmelCase ( self : Tuple , _A : KarrasVeSchedulerState , _A : jnp.ndarray , _A : float , _A : float , _A : jnp.ndarray , _A : bool = True , ): """simple docstring""" _SCREAMING_SNAKE_CASE : int = sample_hat + sigma_hat * model_output _SCREAMING_SNAKE_CASE : Optional[int] = (sample_hat - pred_original_sample) / sigma_hat _SCREAMING_SNAKE_CASE : Dict = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=_A , derivative=_A , state=_A) def _lowerCAmelCase ( self : Union[str, Any] , _A : KarrasVeSchedulerState , _A : jnp.ndarray , _A : float , _A : float , _A : jnp.ndarray , _A : jnp.ndarray , _A : jnp.ndarray , _A : bool = True , ): """simple docstring""" _SCREAMING_SNAKE_CASE : int = sample_prev + sigma_prev * model_output _SCREAMING_SNAKE_CASE : Union[str, Any] = (sample_prev - pred_original_sample) / sigma_prev _SCREAMING_SNAKE_CASE : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=_A , derivative=_A , state=_A) def _lowerCAmelCase ( self : Optional[int] , _A : KarrasVeSchedulerState , _A : int , _A : Optional[Any] , _A : Union[str, Any]): """simple docstring""" raise NotImplementedError()
635
"""simple docstring""" import argparse from collections import defaultdict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : str = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines() _SCREAMING_SNAKE_CASE : Optional[Any] = F"""class {class_name}(""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{4 * " "}def {test_name}(""" _SCREAMING_SNAKE_CASE : Tuple = F"""{8 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{16 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[str] = False _SCREAMING_SNAKE_CASE : Tuple = False _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : Any = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = 0 _SCREAMING_SNAKE_CASE : Dict = [] for line in lines: if line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = True elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = True elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )): _SCREAMING_SNAKE_CASE : Dict = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _SCREAMING_SNAKE_CASE : int = True if in_class and in_func and in_line: if ")" not in line: continue else: _SCREAMING_SNAKE_CASE : Any = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) _SCREAMING_SNAKE_CASE : Optional[int] = False else: new_lines.append(__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , """w""" ) as f: for line in new_lines: f.write(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None )-> Optional[Any]: if fail is not None: with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()} else: _SCREAMING_SNAKE_CASE : str = None with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : str = f.readlines() _SCREAMING_SNAKE_CASE : str = defaultdict(__SCREAMING_SNAKE_CASE ) for line in correct_lines: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) lowerCAmelCase_ = parser.parse_args() main(args.correct_filename, args.fail_filename)
635
1
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[int] , _A : List[Any]): """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""]): _SCREAMING_SNAKE_CASE : Union[str, Any] = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(_A) def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = """sshleifer/tiny-gpt2""" _SCREAMING_SNAKE_CASE : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_A , multi_process=_A , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = TensorFlowBenchmark(_A) _SCREAMING_SNAKE_CASE : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = """sgugger/tiny-distilbert-classification""" _SCREAMING_SNAKE_CASE : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , only_pretrain_model=_A , ) _SCREAMING_SNAKE_CASE : Dict = TensorFlowBenchmark(_A) _SCREAMING_SNAKE_CASE : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCAmelCase ( self : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = """sshleifer/tiny-gpt2""" _SCREAMING_SNAKE_CASE : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) _SCREAMING_SNAKE_CASE : List[str] = TensorFlowBenchmark(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = """sshleifer/tiny-gpt2""" _SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(_A) _SCREAMING_SNAKE_CASE : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_A , multi_process=_A , ) _SCREAMING_SNAKE_CASE : Optional[Any] = TensorFlowBenchmark(_A , [config]) _SCREAMING_SNAKE_CASE : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = """sshleifer/tiny-gpt2""" _SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(_A) _SCREAMING_SNAKE_CASE : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) _SCREAMING_SNAKE_CASE : List[str] = TensorFlowBenchmark(_A , [config]) _SCREAMING_SNAKE_CASE : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : str = """sshleifer/tiny-gpt2""" _SCREAMING_SNAKE_CASE : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) _SCREAMING_SNAKE_CASE : List[str] = TensorFlowBenchmark(_A) _SCREAMING_SNAKE_CASE : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = """sshleifer/tiny-gpt2""" _SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(_A) _SCREAMING_SNAKE_CASE : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) _SCREAMING_SNAKE_CASE : str = TensorFlowBenchmark(_A , [config]) _SCREAMING_SNAKE_CASE : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCAmelCase ( self : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = """patrickvonplaten/t5-tiny-random""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) _SCREAMING_SNAKE_CASE : List[str] = TensorFlowBenchmark(_A , configs=[config]) _SCREAMING_SNAKE_CASE : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""")) == 0 , """Cannot do xla on CPU.""") def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = """sshleifer/tiny-gpt2""" _SCREAMING_SNAKE_CASE : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_A , multi_process=_A , ) _SCREAMING_SNAKE_CASE : Optional[int] = TensorFlowBenchmark(_A) _SCREAMING_SNAKE_CASE : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : int = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: _SCREAMING_SNAKE_CASE : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=_A , save_to_csv=_A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_A , """inf_time.csv""") , inference_memory_csv_file=os.path.join(_A , """inf_mem.csv""") , env_info_csv_file=os.path.join(_A , """env.csv""") , multi_process=_A , ) _SCREAMING_SNAKE_CASE : Optional[Any] = TensorFlowBenchmark(_A) benchmark.run() self.assertTrue(Path(os.path.join(_A , """inf_time.csv""")).exists()) self.assertTrue(Path(os.path.join(_A , """inf_mem.csv""")).exists()) self.assertTrue(Path(os.path.join(_A , """env.csv""")).exists()) def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(_A : Any): self.assertTrue(hasattr(_A , """sequential""")) self.assertTrue(hasattr(_A , """cumulative""")) self.assertTrue(hasattr(_A , """current""")) self.assertTrue(hasattr(_A , """total""")) with tempfile.TemporaryDirectory() as tmp_dir: _SCREAMING_SNAKE_CASE : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_A , """log.txt""") , log_print=_A , trace_memory_line_by_line=_A , eager_mode=_A , multi_process=_A , ) _SCREAMING_SNAKE_CASE : Any = TensorFlowBenchmark(_A) _SCREAMING_SNAKE_CASE : Tuple = benchmark.run() _check_summary_is_not_empty(result.inference_summary) self.assertTrue(Path(os.path.join(_A , """log.txt""")).exists())
635
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowerCAmelCase_ = { '''text_branch''': '''text_model''', '''audio_branch''': '''audio_model.audio_encoder''', '''attn''': '''attention.self''', '''self.proj''': '''output.dense''', '''attention.self_mask''': '''attn_mask''', '''mlp.fc1''': '''intermediate.dense''', '''mlp.fc2''': '''output.dense''', '''norm1''': '''layernorm_before''', '''norm2''': '''layernorm_after''', '''bn0''': '''batch_norm''', } lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''') def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = create_model( """HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = {} _SCREAMING_SNAKE_CASE : Optional[Any] = R""".*sequential.(\d+).*""" _SCREAMING_SNAKE_CASE : Any = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # replace sequential layers with list _SCREAMING_SNAKE_CASE : List[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) _SCREAMING_SNAKE_CASE : Dict = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.""" ) elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... _SCREAMING_SNAKE_CASE : Dict = 1 if projecton_layer == 0 else 2 _SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value _SCREAMING_SNAKE_CASE : Dict = value _SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv.size(0 ) // 3 _SCREAMING_SNAKE_CASE : Optional[Any] = mixed_qkv[:qkv_dim] _SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim : qkv_dim * 2] _SCREAMING_SNAKE_CASE : Any = mixed_qkv[qkv_dim * 2 :] _SCREAMING_SNAKE_CASE : Dict = query_layer _SCREAMING_SNAKE_CASE : List[Any] = key_layer _SCREAMING_SNAKE_CASE : Dict = value_layer else: _SCREAMING_SNAKE_CASE : Optional[Any] = value return model_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> List[Any]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE ) clap_model.eval() _SCREAMING_SNAKE_CASE : Dict = clap_model.state_dict() _SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = ClapConfig() _SCREAMING_SNAKE_CASE : Tuple = enable_fusion _SCREAMING_SNAKE_CASE : Dict = ClapModel(__SCREAMING_SNAKE_CASE ) # ignore the spectrogram embedding layer model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''') lowerCAmelCase_ = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
635
1
"""simple docstring""" import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''', '''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''', } class _snake_case ( __snake_case ): """simple docstring""" a = "encodec" def __init__( self : Union[str, Any] , _A : int=[1.5, 3.0, 6.0, 12.0, 24.0] , _A : Any=2_4_0_0_0 , _A : Dict=1 , _A : Union[str, Any]=False , _A : Optional[Any]=None , _A : Optional[int]=None , _A : List[Any]=1_2_8 , _A : Union[str, Any]=3_2 , _A : Tuple=1 , _A : int=[8, 5, 4, 2] , _A : List[Any]="weight_norm" , _A : Any=7 , _A : Any=7 , _A : Any=3 , _A : Dict=2 , _A : List[str]=True , _A : int="reflect" , _A : str=2 , _A : Union[str, Any]=2 , _A : Dict=1.0 , _A : List[str]=1_0_2_4 , _A : List[Any]=None , _A : Any=True , **_A : Union[str, Any] , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = target_bandwidths _SCREAMING_SNAKE_CASE : Dict = sampling_rate _SCREAMING_SNAKE_CASE : Tuple = audio_channels _SCREAMING_SNAKE_CASE : Any = normalize _SCREAMING_SNAKE_CASE : int = chunk_length_s _SCREAMING_SNAKE_CASE : List[str] = overlap _SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size _SCREAMING_SNAKE_CASE : List[Any] = num_filters _SCREAMING_SNAKE_CASE : str = num_residual_layers _SCREAMING_SNAKE_CASE : Union[str, Any] = upsampling_ratios _SCREAMING_SNAKE_CASE : int = norm_type _SCREAMING_SNAKE_CASE : List[Any] = kernel_size _SCREAMING_SNAKE_CASE : List[Any] = last_kernel_size _SCREAMING_SNAKE_CASE : Optional[int] = residual_kernel_size _SCREAMING_SNAKE_CASE : List[str] = dilation_growth_rate _SCREAMING_SNAKE_CASE : List[str] = use_causal_conv _SCREAMING_SNAKE_CASE : int = pad_mode _SCREAMING_SNAKE_CASE : List[Any] = compress _SCREAMING_SNAKE_CASE : List[Any] = num_lstm_layers _SCREAMING_SNAKE_CASE : List[str] = trim_right_ratio _SCREAMING_SNAKE_CASE : Union[str, Any] = codebook_size _SCREAMING_SNAKE_CASE : str = codebook_dim if codebook_dim is not None else hidden_size _SCREAMING_SNAKE_CASE : Any = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""") super().__init__(**_A) @property def _lowerCAmelCase ( self : List[Any]): """simple docstring""" if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate) @property def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length)) @property def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = np.prod(self.upsampling_ratios) return math.ceil(self.sampling_rate / hop_length) @property def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0))
635
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , ) assert hasattr(self , """env""") def _lowerCAmelCase ( self : Union[str, Any] , _A : str=1): """simple docstring""" return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , ) def _lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any]): """simple docstring""" TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""") def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.create_estimator() # run training estimator.fit() # result dataframe _SCREAMING_SNAKE_CASE : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""]) _SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""]) # get train time from SageMaker job, this includes starting, preprocessing, stopping _SCREAMING_SNAKE_CASE : int = ( Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy) assert all(t <= self.results["""eval_loss"""] for t in eval_loss) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
635
1
"""simple docstring""" import requests lowerCAmelCase_ = '''YOUR API KEY''' def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = giphy_api_key )-> list: _SCREAMING_SNAKE_CASE : List[Any] = """+""".join(query.split() ) _SCREAMING_SNAKE_CASE : Optional[int] = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}""" _SCREAMING_SNAKE_CASE : Dict = requests.get(__SCREAMING_SNAKE_CASE ).json()["""data"""] return [gif["url"] for gif in gifs] if __name__ == "__main__": print('''\n'''.join(get_gifs('''space ship''')))
635
"""simple docstring""" import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip lowerCAmelCase_ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: return max(metric_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for gt in ground_truths ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Dict = [] if args.gold_data_mode == "qa": _SCREAMING_SNAKE_CASE : int = pd.read_csv(__SCREAMING_SNAKE_CASE , sep="""\t""" , header=__SCREAMING_SNAKE_CASE ) for answer_list in data[1]: _SCREAMING_SNAKE_CASE : Union[str, Any] = ast.literal_eval(__SCREAMING_SNAKE_CASE ) answers.append(__SCREAMING_SNAKE_CASE ) else: _SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[int] = [[reference] for reference in references] _SCREAMING_SNAKE_CASE : Optional[int] = 0 for prediction, ground_truths in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): total += 1 em += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) fa += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = 1_00.0 * em / total _SCREAMING_SNAKE_CASE : Optional[Any] = 1_00.0 * fa / total logger.info(F"""F1: {fa:.2f}""" ) logger.info(F"""EM: {em:.2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = args.k _SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[Any] = 0 for hypo, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = set(hypo.split("""\t""" )[:k] ) _SCREAMING_SNAKE_CASE : Union[str, Any] = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k _SCREAMING_SNAKE_CASE : int = 1_00.0 * em / total logger.info(F"""Precision@{k}: {em: .2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: def strip_title(__SCREAMING_SNAKE_CASE ): if title.startswith("""\"""" ): _SCREAMING_SNAKE_CASE : Optional[int] = title[1:] if title.endswith("""\"""" ): _SCREAMING_SNAKE_CASE : str = title[:-1] return title _SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device ) _SCREAMING_SNAKE_CASE : List[str] = rag_model.rag.question_encoder(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = question_enc_outputs[0] _SCREAMING_SNAKE_CASE : List[Any] = rag_model.retriever( __SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) _SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for docs in all_docs: _SCREAMING_SNAKE_CASE : str = [strip_title(__SCREAMING_SNAKE_CASE ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(__SCREAMING_SNAKE_CASE ) ) return provenance_strings def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.attention_mask.to(args.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.generate( # rag_model overwrites generate __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) _SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) if args.print_predictions: for q, a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): logger.info("""Q: {} - A: {}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) return answers def lowerCamelCase_()-> List[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__SCREAMING_SNAKE_CASE , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=__SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=__SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=__SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__SCREAMING_SNAKE_CASE , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=__SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=__SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=__SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=50 , type=__SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) _SCREAMING_SNAKE_CASE : Dict = parser.parse_args() _SCREAMING_SNAKE_CASE : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : Union[str, Any] = {} if args.model_type is None: _SCREAMING_SNAKE_CASE : Optional[int] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration _SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs if args.index_name is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = args.index_name if args.index_path is not None: _SCREAMING_SNAKE_CASE : Any = args.index_path else: _SCREAMING_SNAKE_CASE : Any = BartForConditionalGeneration _SCREAMING_SNAKE_CASE : int = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = get_scores if args.eval_mode == """e2e""" else get_precision_at_k _SCREAMING_SNAKE_CASE : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(__SCREAMING_SNAKE_CASE ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , retriever=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.retriever.init_retrieval() else: _SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: _SCREAMING_SNAKE_CASE : str = [] for line in tqdm(__SCREAMING_SNAKE_CASE ): questions.append(line.strip() ) if len(__SCREAMING_SNAKE_CASE ) == args.eval_batch_size: _SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) + """\n""" ) preds_file.flush() _SCREAMING_SNAKE_CASE : Any = [] if len(__SCREAMING_SNAKE_CASE ) > 0: _SCREAMING_SNAKE_CASE : List[str] = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) ) preds_file.flush() score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": lowerCAmelCase_ = get_args() main(args)
635
1
"""simple docstring""" import math from collections.abc import Iterator from itertools import takewhile def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__SCREAMING_SNAKE_CASE ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCamelCase_()-> Iterator[int]: _SCREAMING_SNAKE_CASE : List[str] = 2 while True: if is_prime(__SCREAMING_SNAKE_CASE ): yield num num += 1 def lowerCamelCase_(__SCREAMING_SNAKE_CASE = 2_000_000 )-> int: return sum(takewhile(lambda __SCREAMING_SNAKE_CASE : x < n , prime_generator() ) ) if __name__ == "__main__": print(F"{solution() = }")
635
"""simple docstring""" import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def lowerCamelCase_(__SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=1_026 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , __SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , )-> Union[str, Any]: set_seed(3 ) # generate train_data and objective_set _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = generate_datasets( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , number=__SCREAMING_SNAKE_CASE , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? _SCREAMING_SNAKE_CASE : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # load pretrained model _SCREAMING_SNAKE_CASE : Any = load_gpta("""gpt2""" ).to(__SCREAMING_SNAKE_CASE ) print("""computing perplexity on objective set""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).item() print("""perplexity on objective set:""" , __SCREAMING_SNAKE_CASE ) # collect igf pairs and save to file demo.jbl collect_objective_set(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=15 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE="igf_model.pt" , )-> Optional[int]: set_seed(42 ) # Load pre-trained model _SCREAMING_SNAKE_CASE : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" ) # Initialize secondary learner to use embedding weights of model _SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(__SCREAMING_SNAKE_CASE ) # Train secondary learner _SCREAMING_SNAKE_CASE : Any = train_secondary_learner( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_epochs=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=__SCREAMING_SNAKE_CASE , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_000 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=recopy_gpta , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = RandomSampler(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(__SCREAMING_SNAKE_CASE )) + 1 _SCREAMING_SNAKE_CASE : List[Any] = 0 _SCREAMING_SNAKE_CASE : Any = torch.zeros((1, context_len) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = recopy_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) model.train() if secondary_learner is not None: secondary_learner.to(__SCREAMING_SNAKE_CASE ) secondary_learner.eval() _SCREAMING_SNAKE_CASE : Dict = [] _SCREAMING_SNAKE_CASE : Optional[int] = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = [] _SCREAMING_SNAKE_CASE : int = [] # Compute the performance of the transformer model at the beginning _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) for epoch in range(int(__SCREAMING_SNAKE_CASE ) ): for step, example in enumerate(__SCREAMING_SNAKE_CASE ): torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 ) _SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() _SCREAMING_SNAKE_CASE : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = True if secondary_learner is not None: _SCREAMING_SNAKE_CASE : List[Any] = secondary_learner.forward( torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item() observed_qs.append(float(__SCREAMING_SNAKE_CASE ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: _SCREAMING_SNAKE_CASE : Dict = -1 if predicted_q < threshold: _SCREAMING_SNAKE_CASE : List[str] = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) _SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def lowerCamelCase_()-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" ) # Required parameters parser.add_argument( """--data_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain data files for WikiText.""" , ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help=( """A jbl file containing tokenized data which can be split as objective dataset, """ """train_dataset and test_dataset.""" ) , ) parser.add_argument( """--igf_data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , ) parser.add_argument( """--output_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The output directory where the final fine-tuned model is stored.""" , ) parser.add_argument( """--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument("""--seed""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A seed for reproducible training.""" ) parser.add_argument( """--context_len""" , default=32 , type=__SCREAMING_SNAKE_CASE , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--size_objective_set""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""number of articles that are long enough to be used as our objective set""" , ) parser.add_argument( """--eval_freq""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""secondary model evaluation is triggered at eval_freq""" ) parser.add_argument("""--max_steps""" , default=1_000 , type=__SCREAMING_SNAKE_CASE , help="""To calculate training epochs""" ) parser.add_argument( """--secondary_learner_batch_size""" , default=128 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data for secondary learner""" , ) parser.add_argument( """--batch_size""" , default=16 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data of language model(gpt2) """ ) parser.add_argument( """--eval_interval""" , default=10 , type=__SCREAMING_SNAKE_CASE , help=( """decay the selectivity of our secondary learner filter from""" """1 standard deviation above average to 1 below average after 10 batches""" ) , ) parser.add_argument( """--number""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""The number of examples split to be used as objective_set/test_data""" ) parser.add_argument( """--min_len""" , default=1_026 , type=__SCREAMING_SNAKE_CASE , help="""The minimum length of the article to be used as objective set""" ) parser.add_argument( """--secondary_learner_max_epochs""" , default=15 , type=__SCREAMING_SNAKE_CASE , help="""number of epochs to train secondary learner""" ) parser.add_argument("""--trim""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""truncate the example if it exceeds context length""" ) parser.add_argument( """--threshold""" , default=1.0 , type=__SCREAMING_SNAKE_CASE , help=( """The threshold value used by secondary learner to filter the train_data and allow only""" """ informative data as input to the model""" ) , ) parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__SCREAMING_SNAKE_CASE , help="""finetuned_model_name""" ) parser.add_argument( """--recopy_model""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , ) # Load train data for secondary learner _SCREAMING_SNAKE_CASE : Optional[int] = joblib.load("""data/IGF_values.jbl""" ) # Train secondary learner _SCREAMING_SNAKE_CASE : int = training_secondary_learner( __SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , ) # load pretrained gpt2 model _SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = generate_datasets( context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=__SCREAMING_SNAKE_CASE , secondary_learner=__SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , ) if __name__ == "__main__": main()
635
1
"""simple docstring""" import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser lowerCAmelCase_ = re.compile(R'''\s+''') def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Union[str, Any]: return {"hash": hashlib.mda(re.sub(__SCREAMING_SNAKE_CASE , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()} def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : Optional[Any] = [len(__SCREAMING_SNAKE_CASE ) for line in example["""content"""].splitlines()] return {"line_mean": np.mean(__SCREAMING_SNAKE_CASE ), "line_max": max(__SCREAMING_SNAKE_CASE )} def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Dict: _SCREAMING_SNAKE_CASE : List[Any] = np.mean([c.isalnum() for c in example["""content"""]] ) return {"alpha_frac": alpha_frac} def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]: if example["hash"] in uniques: uniques.remove(example["""hash"""] ) return True else: return False def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 )-> Any: _SCREAMING_SNAKE_CASE : Any = ["""auto-generated""", """autogenerated""", """automatically generated"""] _SCREAMING_SNAKE_CASE : Union[str, Any] = example["""content"""].splitlines() for _, line in zip(range(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=0.05 )-> Optional[int]: _SCREAMING_SNAKE_CASE : List[Any] = ["""unit tests""", """test file""", """configuration file"""] _SCREAMING_SNAKE_CASE : Optional[int] = example["""content"""].splitlines() _SCREAMING_SNAKE_CASE : Dict = 0 _SCREAMING_SNAKE_CASE : Union[str, Any] = 0 # first test for _, line in zip(range(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test _SCREAMING_SNAKE_CASE : List[Any] = example["""content"""].count("""\n""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = int(coeff * nlines ) for line in lines: count_config += line.lower().count("""config""" ) count_test += line.lower().count("""test""" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : Any = ["""def """, """class """, """for """, """while """] _SCREAMING_SNAKE_CASE : List[str] = example["""content"""].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=4 )-> Dict: _SCREAMING_SNAKE_CASE : Any = example["""content"""].splitlines() _SCREAMING_SNAKE_CASE : Tuple = 0 for line in lines: counter += line.lower().count("""=""" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[Any]: _SCREAMING_SNAKE_CASE : int = tokenizer(example["""content"""] , truncation=__SCREAMING_SNAKE_CASE )["""input_ids"""] _SCREAMING_SNAKE_CASE : int = len(example["""content"""] ) / len(__SCREAMING_SNAKE_CASE ) return {"ratio": ratio} def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: _SCREAMING_SNAKE_CASE : List[str] = {} results.update(get_hash(__SCREAMING_SNAKE_CASE ) ) results.update(line_stats(__SCREAMING_SNAKE_CASE ) ) results.update(alpha_stats(__SCREAMING_SNAKE_CASE ) ) results.update(char_token_ratio(__SCREAMING_SNAKE_CASE ) ) results.update(is_autogenerated(__SCREAMING_SNAKE_CASE ) ) results.update(is_config_or_test(__SCREAMING_SNAKE_CASE ) ) results.update(has_no_keywords(__SCREAMING_SNAKE_CASE ) ) results.update(has_few_assignments(__SCREAMING_SNAKE_CASE ) ) return results def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: if not check_uniques(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f_in: with gzip.open(str(__SCREAMING_SNAKE_CASE ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out: shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) os.unlink(__SCREAMING_SNAKE_CASE ) # Settings lowerCAmelCase_ = HfArgumentParser(PreprocessingArguments) lowerCAmelCase_ = parser.parse_args() if args.num_workers is None: lowerCAmelCase_ = multiprocessing.cpu_count() lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset lowerCAmelCase_ = time.time() lowerCAmelCase_ = load_dataset(args.dataset_name, split='''train''') print(F"Time to load dataset: {time.time()-t_start:.2f}") # Run preprocessing lowerCAmelCase_ = time.time() lowerCAmelCase_ = ds.map(preprocess, num_proc=args.num_workers) print(F"Time to preprocess dataset: {time.time()-t_start:.2f}") # Deduplicate hashes lowerCAmelCase_ = set(ds.unique('''hash''')) lowerCAmelCase_ = len(uniques) / len(ds) print(F"Fraction of duplicates: {1-frac:.2%}") # Deduplicate data and apply heuristics lowerCAmelCase_ = time.time() lowerCAmelCase_ = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args}) print(F"Time to filter dataset: {time.time()-t_start:.2f}") print(F"Size of filtered dataset: {len(ds_filter)}") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: lowerCAmelCase_ = time.time() lowerCAmelCase_ , lowerCAmelCase_ = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}") print(F"Size of deduplicate dataset: {len(ds_filter)}") # Save data in batches of samples_per_file lowerCAmelCase_ = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / '''duplicate_clusters.json''', '''w''') as f: json.dump(duplicate_clusters, f) lowerCAmelCase_ = output_dir / '''data''' data_dir.mkdir(exist_ok=True) lowerCAmelCase_ = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): lowerCAmelCase_ = str(data_dir / F"file-{file_number+1:012}.json") lowerCAmelCase_ = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(F"Time to save dataset: {time.time()-t_start:.2f}")
635
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( __snake_case ): """simple docstring""" a = ["image_processor", "tokenizer"] a = "ChineseCLIPImageProcessor" a = ("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , _A : Tuple=None , _A : List[Any]=None , **_A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _A , ) _SCREAMING_SNAKE_CASE : str = kwargs.pop("""feature_extractor""") _SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""") if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""") super().__init__(_A , _A) _SCREAMING_SNAKE_CASE : Dict = self.image_processor def __call__( self : Optional[int] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , **_A : int): """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""") if text is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_A , return_tensors=_A , **_A) if images is not None: _SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(_A , return_tensors=_A , **_A) if text is not None and images is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A) , tensor_type=_A) def _lowerCAmelCase ( self : str , *_A : Any , **_A : Any): """simple docstring""" return self.tokenizer.batch_decode(*_A , **_A) def _lowerCAmelCase ( self : Union[str, Any] , *_A : List[Any] , **_A : Any): """simple docstring""" return self.tokenizer.decode(*_A , **_A) @property def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names _SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def _lowerCAmelCase ( self : List[str]): """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , ) return self.image_processor_class
635
1
"""simple docstring""" from maths.prime_check import is_prime def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = F"""Input value of [number={number}] must be an integer""" raise TypeError(__SCREAMING_SNAKE_CASE ) if is_prime(__SCREAMING_SNAKE_CASE ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
635
"""simple docstring""" import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = ['''model.decoder.embed_positions.weights'''] def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: if "emb" in name: _SCREAMING_SNAKE_CASE : List[Any] = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: _SCREAMING_SNAKE_CASE : List[str] = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: _SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: _SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: _SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: _SCREAMING_SNAKE_CASE : int = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: _SCREAMING_SNAKE_CASE : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: _SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: _SCREAMING_SNAKE_CASE : Tuple = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: _SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: _SCREAMING_SNAKE_CASE : str = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple[Dict, Dict]: _SCREAMING_SNAKE_CASE : str = list(state_dict.keys() ) _SCREAMING_SNAKE_CASE : Tuple = {} for key in keys: _SCREAMING_SNAKE_CASE : Dict = state_dict.pop(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = rename_keys(__SCREAMING_SNAKE_CASE ) if "in_proj_weight" in key: # split fused qkv proj _SCREAMING_SNAKE_CASE : str = val[:hidden_size, :] _SCREAMING_SNAKE_CASE : Any = val[hidden_size : 2 * hidden_size, :] _SCREAMING_SNAKE_CASE : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: _SCREAMING_SNAKE_CASE : int = val else: _SCREAMING_SNAKE_CASE : Dict = val return state_dict, enc_dec_proj_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> MusicgenDecoderConfig: if checkpoint == "small": # default config values _SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 _SCREAMING_SNAKE_CASE : str = 24 _SCREAMING_SNAKE_CASE : Any = 16 elif checkpoint == "medium": _SCREAMING_SNAKE_CASE : Dict = 1_536 _SCREAMING_SNAKE_CASE : Union[str, Any] = 48 _SCREAMING_SNAKE_CASE : Optional[Any] = 24 elif checkpoint == "large": _SCREAMING_SNAKE_CASE : List[Any] = 2_048 _SCREAMING_SNAKE_CASE : Optional[int] = 48 _SCREAMING_SNAKE_CASE : str = 32 else: raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = MusicgenDecoderConfig( hidden_size=__SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=__SCREAMING_SNAKE_CASE , num_attention_heads=__SCREAMING_SNAKE_CASE , ) return config @torch.no_grad() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="cpu" )-> str: _SCREAMING_SNAKE_CASE : str = MusicGen.get_pretrained(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = decoder_config_from_checkpoint(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = fairseq_model.lm.state_dict() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = rename_state_dict( __SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size ) _SCREAMING_SNAKE_CASE : Tuple = TaEncoderModel.from_pretrained("""t5-base""" ) _SCREAMING_SNAKE_CASE : List[Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) _SCREAMING_SNAKE_CASE : str = MusicgenForCausalLM(__SCREAMING_SNAKE_CASE ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" ) if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model _SCREAMING_SNAKE_CASE : Dict = MusicgenForConditionalGeneration(text_encoder=__SCREAMING_SNAKE_CASE , audio_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__SCREAMING_SNAKE_CASE ) # check we can do a forward pass _SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) _SCREAMING_SNAKE_CASE : Dict = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits if logits.shape != (8, 1, 2_048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""t5-base""" ) _SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) _SCREAMING_SNAKE_CASE : Optional[int] = MusicgenProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE ) # set the appropriate bos/pad token ids _SCREAMING_SNAKE_CASE : Optional[Any] = 2_048 _SCREAMING_SNAKE_CASE : List[Any] = 2_048 # set other default generation config params _SCREAMING_SNAKE_CASE : Any = int(30 * audio_encoder.config.frame_rate ) _SCREAMING_SNAKE_CASE : Tuple = True _SCREAMING_SNAKE_CASE : int = 3.0 if pytorch_dump_folder is not None: Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if repo_id: logger.info(F"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(__SCREAMING_SNAKE_CASE ) processor.push_to_hub(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint''', default='''small''', type=str, help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''', ) parser.add_argument( '''--pytorch_dump_folder''', required=True, default=None, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) parser.add_argument( '''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.''' ) lowerCAmelCase_ = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
635
1
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None )-> Dict: if attention_mask is None: _SCREAMING_SNAKE_CASE : str = tf.cast(tf.math.not_equal(__SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class _snake_case : """simple docstring""" a = OPTConfig a = {} a = "gelu" def __init__( self : Any , _A : Optional[Any] , _A : Optional[int]=1_3 , _A : Optional[Any]=7 , _A : List[str]=True , _A : Tuple=False , _A : int=9_9 , _A : Dict=1_6 , _A : List[Any]=2 , _A : Tuple=4 , _A : List[str]=4 , _A : List[Any]="gelu" , _A : int=0.1 , _A : List[Any]=0.1 , _A : Dict=2_0 , _A : int=2 , _A : Optional[Any]=1 , _A : str=0 , _A : Tuple=1_6 , _A : Optional[Any]=1_6 , ): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = parent _SCREAMING_SNAKE_CASE : Dict = batch_size _SCREAMING_SNAKE_CASE : Dict = seq_length _SCREAMING_SNAKE_CASE : str = is_training _SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels _SCREAMING_SNAKE_CASE : List[str] = vocab_size _SCREAMING_SNAKE_CASE : int = hidden_size _SCREAMING_SNAKE_CASE : str = num_hidden_layers _SCREAMING_SNAKE_CASE : List[str] = num_attention_heads _SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size _SCREAMING_SNAKE_CASE : Dict = hidden_act _SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob _SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : int = max_position_embeddings _SCREAMING_SNAKE_CASE : List[str] = eos_token_id _SCREAMING_SNAKE_CASE : List[Any] = pad_token_id _SCREAMING_SNAKE_CASE : Optional[int] = bos_token_id _SCREAMING_SNAKE_CASE : Any = embed_dim _SCREAMING_SNAKE_CASE : str = word_embed_proj_dim _SCREAMING_SNAKE_CASE : List[str] = False def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) _SCREAMING_SNAKE_CASE : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) _SCREAMING_SNAKE_CASE : str = tf.concat([input_ids, eos_tensor] , axis=1) _SCREAMING_SNAKE_CASE : Dict = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_A , **self.config_updates , ) _SCREAMING_SNAKE_CASE : str = prepare_opt_inputs_dict(_A , _A) return config, inputs_dict def _lowerCAmelCase ( self : List[str] , _A : Tuple , _A : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = TFOPTModel(config=_A) _SCREAMING_SNAKE_CASE : Any = inputs_dict["""input_ids"""] _SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids[:1, :] _SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["""attention_mask"""][:1, :] _SCREAMING_SNAKE_CASE : Dict = 1 # first forward pass _SCREAMING_SNAKE_CASE : List[Any] = model(_A , attention_mask=_A , use_cache=_A) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _SCREAMING_SNAKE_CASE : int = ids_tensor((self.batch_size, 3) , config.vocab_size) _SCREAMING_SNAKE_CASE : int = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta) # append to next input_ids and _SCREAMING_SNAKE_CASE : Any = tf.concat([input_ids, next_tokens] , axis=-1) _SCREAMING_SNAKE_CASE : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1) _SCREAMING_SNAKE_CASE : List[str] = model(_A , attention_mask=_A)[0] _SCREAMING_SNAKE_CASE : List[Any] = model(_A , attention_mask=_A , past_key_values=_A)[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1]) # select random slice _SCREAMING_SNAKE_CASE : Any = int(ids_tensor((1,) , output_from_past.shape[-1])) _SCREAMING_SNAKE_CASE : Dict = output_from_no_past[:, -3:, random_slice_idx] _SCREAMING_SNAKE_CASE : str = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_A , _A , rtol=1e-3) @require_tf class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): """simple docstring""" a = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () a = (TFOPTForCausalLM,) if is_tf_available() else () a = ( {"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {} ) a = False a = False a = False a = 10 def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : int = TFOPTModelTester(self) _SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=_A) def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" self.config_tester.run_common_tests() def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_A) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(_A : str , _A : Union[str, Any]): if hasattr(_A , """weight"""): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(_A , """weight"""): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]: # build the embeddings _SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(config=_A) _SCREAMING_SNAKE_CASE : int = _get_word_embedding_weight(_A , model.get_input_embeddings()) _SCREAMING_SNAKE_CASE : List[Any] = _get_word_embedding_weight(_A , model.get_output_embeddings()) # reshape the embeddings model.resize_token_embeddings(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = _get_word_embedding_weight(_A , model.get_input_embeddings()) _SCREAMING_SNAKE_CASE : Tuple = _get_word_embedding_weight(_A , model.get_output_embeddings()) # check that the resized embeddings size matches the desired size. _SCREAMING_SNAKE_CASE : Tuple = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , _A) # check that weights remain the same after resizing _SCREAMING_SNAKE_CASE : Optional[int] = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0: _SCREAMING_SNAKE_CASE : Optional[int] = False self.assertTrue(_A) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , _A) _SCREAMING_SNAKE_CASE : Tuple = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0: _SCREAMING_SNAKE_CASE : int = False self.assertTrue(_A) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Dict: return tf.constant(__SCREAMING_SNAKE_CASE , dtype=tf.intaa ) @require_tf class _snake_case ( unittest.TestCase ): """simple docstring""" a = 99 def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : int = tf.ones((4, 1) , dtype=tf.intaa) * 2 _SCREAMING_SNAKE_CASE : str = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3) + 3, eos_column_vector] , axis=1) _SCREAMING_SNAKE_CASE : Tuple = input_ids.shape[0] _SCREAMING_SNAKE_CASE : Union[str, Any] = OPTConfig( vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class _snake_case ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = TFOPTModel.from_pretrained("""facebook/opt-350m""") _SCREAMING_SNAKE_CASE : Any = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]]) _SCREAMING_SNAKE_CASE : Tuple = tf.not_equal(_A , model.config.pad_token_id) with tf.GradientTape(): _SCREAMING_SNAKE_CASE : Tuple = model(input_ids=_A , attention_mask=_A).last_hidden_state _SCREAMING_SNAKE_CASE : Any = (1, 1_1, 5_1_2) self.assertEqual(output.shape , _A) _SCREAMING_SNAKE_CASE : Tuple = tf.constant( [[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]]) self.assertTrue(np.allclose(output[:, :3, :3] , _A , atol=4e-3)) _SCREAMING_SNAKE_CASE : Optional[int] = tf.function(_A , jit_compile=_A) _SCREAMING_SNAKE_CASE : Dict = xla_generate(_A , _A)[0] self.assertTrue(np.allclose(output[:, :3, :3] , _A , atol=4e-2)) @require_tf @slow class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" super().setUp() _SCREAMING_SNAKE_CASE : Optional[int] = """facebook/opt-350m""" def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = TFOPTForCausalLM.from_pretrained(self.path_model) _SCREAMING_SNAKE_CASE : int = GPTaTokenizer.from_pretrained(self.path_model) _SCREAMING_SNAKE_CASE : Dict = [ """Today is a beautiful day and I want to""", """In the city of""", """Paris is the capital of France and""", """Computers and mobile phones have taken""", ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False _SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , return_tensors="""tf""" , padding=_A , add_special_tokens=_A) _SCREAMING_SNAKE_CASE : List[str] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1) _SCREAMING_SNAKE_CASE : int = tf.constant( [ [1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670], [-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822], [0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703], [6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477], ]) self.assertTrue(np.allclose(_A , _A , atol=1e-4)) _SCREAMING_SNAKE_CASE : List[Any] = tf.function(_A , jit_compile=_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1) self.assertTrue(np.allclose(_A , _A , atol=1e-4)) @require_tf @slow class _snake_case ( unittest.TestCase ): """simple docstring""" @property def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = """facebook/opt-125m""" _SCREAMING_SNAKE_CASE : Dict = [ """Today is a beautiful day and I want to""", """In the city of New York, the city""", """Paris is the capital of France and the capital""", """Computers and mobile phones have taken over the""", ] _SCREAMING_SNAKE_CASE : List[Any] = [] _SCREAMING_SNAKE_CASE : List[str] = GPTaTokenizer.from_pretrained(_A) _SCREAMING_SNAKE_CASE : Tuple = TFOPTForCausalLM.from_pretrained(_A) for prompt in self.prompts: _SCREAMING_SNAKE_CASE : List[Any] = tokenizer(_A , return_tensors="""tf""").input_ids _SCREAMING_SNAKE_CASE : Any = model.generate(_A , max_length=1_0) _SCREAMING_SNAKE_CASE : Any = tokenizer.batch_decode(_A , skip_special_tokens=_A) predicted_outputs += generated_string self.assertListEqual(_A , _A) def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = """facebook/opt-350m""" _SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer.from_pretrained(_A) _SCREAMING_SNAKE_CASE : Any = TFOPTForCausalLM.from_pretrained(_A) _SCREAMING_SNAKE_CASE : Optional[int] = """left""" # use different length sentences to test batching _SCREAMING_SNAKE_CASE : int = [ """Hello, my dog is a little""", """Today, I""", ] _SCREAMING_SNAKE_CASE : Any = tokenizer(_A , return_tensors="""tf""" , padding=_A) _SCREAMING_SNAKE_CASE : int = inputs["""input_ids"""] _SCREAMING_SNAKE_CASE : List[str] = model.generate(input_ids=_A , attention_mask=inputs["""attention_mask"""]) _SCREAMING_SNAKE_CASE : Tuple = tokenizer(sentences[0] , return_tensors="""tf""").input_ids _SCREAMING_SNAKE_CASE : List[Any] = model.generate(input_ids=_A) _SCREAMING_SNAKE_CASE : str = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs["""attention_mask"""][-1] , tf.intaa)) _SCREAMING_SNAKE_CASE : List[Any] = tokenizer(sentences[1] , return_tensors="""tf""").input_ids _SCREAMING_SNAKE_CASE : Dict = model.generate(input_ids=_A , max_length=model.config.max_length - num_paddings) _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.batch_decode(_A , skip_special_tokens=_A) _SCREAMING_SNAKE_CASE : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_A) _SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(output_padded[0] , skip_special_tokens=_A) _SCREAMING_SNAKE_CASE : int = [ """Hello, my dog is a little bit of a dork.\nI'm a little bit""", """Today, I was in the middle of a conversation with a friend about the""", ] self.assertListEqual(_A , _A) self.assertListEqual(_A , [non_padded_sentence, padded_sentence]) def _lowerCAmelCase ( self : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = """facebook/opt-350m""" _SCREAMING_SNAKE_CASE : Optional[Any] = [ """Today is a beautiful day and I want to""", """In the city of San Francisco, the city""", """Paris is the capital of France and the capital""", """Computers and mobile phones have taken over the""", ] _SCREAMING_SNAKE_CASE : Tuple = [] _SCREAMING_SNAKE_CASE : str = GPTaTokenizer.from_pretrained(_A) _SCREAMING_SNAKE_CASE : Tuple = TFOPTForCausalLM.from_pretrained(_A) for prompt in self.prompts: _SCREAMING_SNAKE_CASE : List[Any] = tokenizer(_A , return_tensors="""tf""").input_ids _SCREAMING_SNAKE_CASE : Tuple = model.generate(_A , max_length=1_0) _SCREAMING_SNAKE_CASE : str = tokenizer.batch_decode(_A , skip_special_tokens=_A) predicted_outputs += generated_string self.assertListEqual(_A , _A)
635
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''', # See all SEW models at https://huggingface.co/models?filter=sew } class _snake_case ( __snake_case ): """simple docstring""" a = "sew" def __init__( self : List[Any] , _A : Tuple=3_2 , _A : str=7_6_8 , _A : Dict=1_2 , _A : Tuple=1_2 , _A : Optional[Any]=3_0_7_2 , _A : List[str]=2 , _A : Dict="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.0 , _A : str=0.1 , _A : Tuple=0.1 , _A : Optional[int]=0.02 , _A : Dict=1e-5 , _A : str="group" , _A : Tuple="gelu" , _A : Union[str, Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _A : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Tuple=False , _A : Tuple=1_2_8 , _A : int=1_6 , _A : Union[str, Any]=True , _A : Optional[Any]=0.05 , _A : List[Any]=1_0 , _A : Union[str, Any]=2 , _A : Tuple=0.0 , _A : Union[str, Any]=1_0 , _A : Optional[int]=0 , _A : Union[str, Any]="mean" , _A : Optional[int]=False , _A : List[Any]=False , _A : int=2_5_6 , _A : str=0 , _A : Optional[int]=1 , _A : List[Any]=2 , **_A : Dict , ): """simple docstring""" super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A) _SCREAMING_SNAKE_CASE : str = hidden_size _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation _SCREAMING_SNAKE_CASE : Dict = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : str = conv_bias _SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embeddings _SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups _SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim) _SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers _SCREAMING_SNAKE_CASE : List[str] = intermediate_size _SCREAMING_SNAKE_CASE : str = squeeze_factor _SCREAMING_SNAKE_CASE : Dict = hidden_act _SCREAMING_SNAKE_CASE : str = num_attention_heads _SCREAMING_SNAKE_CASE : Dict = hidden_dropout _SCREAMING_SNAKE_CASE : Tuple = attention_dropout _SCREAMING_SNAKE_CASE : int = activation_dropout _SCREAMING_SNAKE_CASE : Any = feat_proj_dropout _SCREAMING_SNAKE_CASE : str = final_dropout _SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop _SCREAMING_SNAKE_CASE : Any = layer_norm_eps _SCREAMING_SNAKE_CASE : int = initializer_range _SCREAMING_SNAKE_CASE : List[Any] = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""") # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment _SCREAMING_SNAKE_CASE : List[Any] = mask_time_prob _SCREAMING_SNAKE_CASE : List[str] = mask_time_length _SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_prob _SCREAMING_SNAKE_CASE : int = mask_feature_length _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks # ctc loss _SCREAMING_SNAKE_CASE : int = ctc_loss_reduction _SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity # sequence classification _SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum _SCREAMING_SNAKE_CASE : List[str] = classifier_proj_size @property def _lowerCAmelCase ( self : Any): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1)
635
1
"""simple docstring""" import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def lowerCamelCase_()-> None: print("""Making key files...""" ) make_key_files("""rsa""" , 1_024 ) print("""Key files generation successful.""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> tuple[tuple[int, int], tuple[int, int]]: print("""Generating prime p...""" ) _SCREAMING_SNAKE_CASE : Dict = rabinMiller.generate_large_prime(__SCREAMING_SNAKE_CASE ) print("""Generating prime q...""" ) _SCREAMING_SNAKE_CASE : int = rabinMiller.generate_large_prime(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = p * q print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" ) while True: _SCREAMING_SNAKE_CASE : Union[str, Any] = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(__SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) ) == 1: break print("""Calculating d that is mod inverse of e...""" ) _SCREAMING_SNAKE_CASE : int = cryptoMath.find_mod_inverse(__SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) ) _SCREAMING_SNAKE_CASE : Optional[Any] = (n, e) _SCREAMING_SNAKE_CASE : List[Any] = (n, d) return (public_key, private_key) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> None: if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ): print("""\nWARNING:""" ) print( F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" """Use a different name or delete these files and re-run this program.""" ) sys.exit() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = generate_key(__SCREAMING_SNAKE_CASE ) print(F"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(F"""{name}_pubkey.txt""" , """w""" ) as out_file: out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" ) print(F"""Writing private key to file {name}_privkey.txt...""" ) with open(F"""{name}_privkey.txt""" , """w""" ) as out_file: out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" ) if __name__ == "__main__": main()
635
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''', '''UniSpeechForCTC''', '''UniSpeechForPreTraining''', '''UniSpeechForSequenceClassification''', '''UniSpeechModel''', '''UniSpeechPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
1
"""simple docstring""" import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Any: if isinstance(__SCREAMING_SNAKE_CASE , collections.abc.Iterable ): return x return (x, x) @require_flax class _snake_case : """simple docstring""" def _lowerCAmelCase ( self : Tuple , _A : Tuple , _A : str): """simple docstring""" pass def _lowerCAmelCase ( self : List[Any]): """simple docstring""" pass def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" pass def _lowerCAmelCase ( self : int , _A : np.ndarray , _A : np.ndarray , _A : float): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = np.abs((a - b)).max() self.assertLessEqual(_A , _A , f"""Difference between torch and flax is {diff} (>= {tol}).""") def _lowerCAmelCase ( self : Any , _A : List[str] , _A : Dict , _A : str , _A : List[Any] , _A : Dict=None , **_A : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A) _SCREAMING_SNAKE_CASE : str = FlaxVisionTextDualEncoderModel(_A) _SCREAMING_SNAKE_CASE : str = model(input_ids=_A , pixel_values=_A , attention_mask=_A) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim)) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim)) def _lowerCAmelCase ( self : Optional[Any] , _A : List[Any] , _A : str , _A : Optional[Any] , _A : Dict , _A : Optional[Any]=None , **_A : Any): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self.get_vision_text_model(_A , _A) _SCREAMING_SNAKE_CASE : Dict = {"""vision_model""": vision_model, """text_model""": text_model} _SCREAMING_SNAKE_CASE : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model(input_ids=_A , pixel_values=_A , attention_mask=_A) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim)) def _lowerCAmelCase ( self : Tuple , _A : Tuple , _A : List[Any] , _A : Dict , _A : List[str] , _A : int=None , **_A : Any): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self.get_vision_text_model(_A , _A) _SCREAMING_SNAKE_CASE : int = {"""vision_model""": vision_model, """text_model""": text_model} _SCREAMING_SNAKE_CASE : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A) _SCREAMING_SNAKE_CASE : List[Any] = model(input_ids=_A , pixel_values=_A , attention_mask=_A) _SCREAMING_SNAKE_CASE : str = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A) _SCREAMING_SNAKE_CASE : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(_A) _SCREAMING_SNAKE_CASE : str = model(input_ids=_A , pixel_values=_A , attention_mask=_A) _SCREAMING_SNAKE_CASE : List[Any] = after_output[0] _SCREAMING_SNAKE_CASE : Optional[int] = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(_A , 1e-3) def _lowerCAmelCase ( self : Optional[Any] , _A : Union[str, Any] , _A : Tuple , _A : Optional[Any] , _A : Dict , _A : List[str]=None , **_A : str): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_vision_text_model(_A , _A) _SCREAMING_SNAKE_CASE : str = {"""vision_model""": vision_model, """text_model""": text_model} _SCREAMING_SNAKE_CASE : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A) _SCREAMING_SNAKE_CASE : List[Any] = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A) _SCREAMING_SNAKE_CASE : Dict = output.vision_model_output.attentions self.assertEqual(len(_A) , vision_config.num_hidden_layers) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) _SCREAMING_SNAKE_CASE : Tuple = to_atuple(vision_model.config.image_size) _SCREAMING_SNAKE_CASE : List[str] = to_atuple(vision_model.config.patch_size) _SCREAMING_SNAKE_CASE : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _SCREAMING_SNAKE_CASE : Union[str, Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len)) _SCREAMING_SNAKE_CASE : Dict = output.text_model_output.attentions self.assertEqual(len(_A) , text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _lowerCAmelCase ( self : List[str] , _A : List[str] , _A : Optional[Any] , _A : Tuple): """simple docstring""" pt_model.to(_A) pt_model.eval() # prepare inputs _SCREAMING_SNAKE_CASE : Any = inputs_dict _SCREAMING_SNAKE_CASE : Dict = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()} with torch.no_grad(): _SCREAMING_SNAKE_CASE : Any = pt_model(**_A).to_tuple() _SCREAMING_SNAKE_CASE : Dict = fx_model(**_A).to_tuple() self.assertEqual(len(_A) , len(_A) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]): self.assert_almost_equals(_A , pt_output.numpy() , 4e-2) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(_A) _SCREAMING_SNAKE_CASE : int = FlaxVisionTextDualEncoderModel.from_pretrained(_A , from_pt=_A) _SCREAMING_SNAKE_CASE : Dict = fx_model_loaded(**_A).to_tuple() self.assertEqual(len(_A) , len(_A) , """Output lengths differ between Flax and PyTorch""") for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]): self.assert_almost_equals(_A , pt_output.numpy() , 4e-2) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(_A) _SCREAMING_SNAKE_CASE : Any = VisionTextDualEncoderModel.from_pretrained(_A , from_flax=_A) pt_model_loaded.to(_A) pt_model_loaded.eval() with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[Any] = pt_model_loaded(**_A).to_tuple() self.assertEqual(len(_A) , len(_A) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]): self.assert_almost_equals(_A , pt_output_loaded.numpy() , 4e-2) def _lowerCAmelCase ( self : List[Any] , _A : int , _A : List[Any] , _A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A) _SCREAMING_SNAKE_CASE : Any = VisionTextDualEncoderModel(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = FlaxVisionTextDualEncoderModel(_A) _SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _A) _SCREAMING_SNAKE_CASE : List[Any] = fx_state self.check_pt_flax_equivalence(_A , _A , _A) def _lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A) _SCREAMING_SNAKE_CASE : List[str] = VisionTextDualEncoderModel(_A) _SCREAMING_SNAKE_CASE : List[Any] = FlaxVisionTextDualEncoderModel(_A) _SCREAMING_SNAKE_CASE : Any = load_flax_weights_in_pytorch_model(_A , fx_model.params) self.check_pt_flax_equivalence(_A , _A , _A) def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_A) def _lowerCAmelCase ( self : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_A) def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() self.check_save_load(**_A) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_A) @is_pt_flax_cross_test def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE : Optional[int] = config_inputs_dict.pop("""vision_config""") _SCREAMING_SNAKE_CASE : Optional[int] = config_inputs_dict.pop("""text_config""") _SCREAMING_SNAKE_CASE : Union[str, Any] = config_inputs_dict self.check_equivalence_pt_to_flax(_A , _A , _A) self.check_equivalence_flax_to_pt(_A , _A , _A) @slow def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.get_pretrained_model_and_inputs() _SCREAMING_SNAKE_CASE : List[Any] = model_a(**_A) _SCREAMING_SNAKE_CASE : Optional[int] = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_A) _SCREAMING_SNAKE_CASE : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = model_a(**_A) _SCREAMING_SNAKE_CASE : List[Any] = after_outputs[0] _SCREAMING_SNAKE_CASE : Tuple = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(_A , 1e-5) @require_flax class _snake_case ( __snake_case , unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=_A , text_from_pt=_A , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = 1_3 _SCREAMING_SNAKE_CASE : Dict = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ]) _SCREAMING_SNAKE_CASE : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size) _SCREAMING_SNAKE_CASE : Union[str, Any] = random_attention_mask([batch_size, 4]) _SCREAMING_SNAKE_CASE : Optional[Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _lowerCAmelCase ( self : Union[str, Any] , _A : Tuple , _A : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxViTModel(_A) _SCREAMING_SNAKE_CASE : Optional[int] = FlaxBertModel(_A) return vision_model, text_model def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = FlaxViTModelTester(self) _SCREAMING_SNAKE_CASE : List[Any] = FlaxBertModelTester(self) _SCREAMING_SNAKE_CASE : Union[str, Any] = vit_model_tester.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE : Optional[Any] = bert_model_tester.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = vision_config_and_inputs _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class _snake_case ( __snake_case , unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=_A , text_from_pt=_A , ) _SCREAMING_SNAKE_CASE : Any = 1_3 _SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ]) _SCREAMING_SNAKE_CASE : List[str] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size) _SCREAMING_SNAKE_CASE : Dict = random_attention_mask([batch_size, 4]) _SCREAMING_SNAKE_CASE : Optional[int] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _lowerCAmelCase ( self : Any , _A : Optional[Any] , _A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = FlaxCLIPVisionModel(_A) _SCREAMING_SNAKE_CASE : Dict = FlaxBertModel(_A) return vision_model, text_model def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = FlaxCLIPVisionModelTester(self) _SCREAMING_SNAKE_CASE : int = FlaxBertModelTester(self) _SCREAMING_SNAKE_CASE : Any = clip_model_tester.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE : str = bert_model_tester.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = vision_config_and_inputs _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class _snake_case ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0) _SCREAMING_SNAKE_CASE : List[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""") _SCREAMING_SNAKE_CASE : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""") _SCREAMING_SNAKE_CASE : Any = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=_A , padding=_A , return_tensors="""np""") _SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_A) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0])) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) _SCREAMING_SNAKE_CASE : Dict = np.array([[1.2_284_727, 0.3_104_122]]) self.assertTrue(np.allclose(outputs.logits_per_image , _A , atol=1e-3))
635
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : int = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : List[Any] = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str: if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = parquet_path elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] _SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=("train",) )-> Union[str, Any]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for split in splits: _SCREAMING_SNAKE_CASE : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : Dict = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: _SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[str] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : int = ParquetDatasetReader({"""train""": parquet_path} , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: if split: _SCREAMING_SNAKE_CASE : Union[str, Any] = {split: parquet_path} else: _SCREAMING_SNAKE_CASE : Optional[int] = """train""" _SCREAMING_SNAKE_CASE : Any = {"""train""": parquet_path, """test""": parquet_path} _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: _SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / """foo.parquet""" ) _SCREAMING_SNAKE_CASE : str = pf.read() assert dataset.data.table == output_table def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Dict = str(shared_datadir / """test_image_rgb.jpg""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = {"""image""": [image_path]} _SCREAMING_SNAKE_CASE : Optional[Any] = Features({"""image""": Image()} ) _SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__SCREAMING_SNAKE_CASE ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: assert get_writer_batch_size(__SCREAMING_SNAKE_CASE ) == expected
635
1
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool: if not all(x.isalpha() for x in string ): raise ValueError("""String must only contain alphabetic characters.""" ) _SCREAMING_SNAKE_CASE : Any = sorted(string.lower() ) return len(__SCREAMING_SNAKE_CASE ) == len(set(__SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": lowerCAmelCase_ = input('''Enter a string ''').strip() lowerCAmelCase_ = is_isogram(input_str) print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
635
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""only integers accepted as input""" ) else: _SCREAMING_SNAKE_CASE : List[Any] = str(abs(__SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : List[str] = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )] for index in range(len(__SCREAMING_SNAKE_CASE ) ): num_transpositions[index].pop(__SCREAMING_SNAKE_CASE ) return max( int("""""".join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
635
1
"""simple docstring""" import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels lowerCAmelCase_ = object() # For specifying empty leaf dict `{}` lowerCAmelCase_ = object() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str: _SCREAMING_SNAKE_CASE : List[str] = tuple((re.compile(x + """$""" ) for x in qs) ) for i in range(len(__SCREAMING_SNAKE_CASE ) - len(__SCREAMING_SNAKE_CASE ) + 1 ): _SCREAMING_SNAKE_CASE : List[Any] = [x.match(__SCREAMING_SNAKE_CASE ) for x, y in zip(__SCREAMING_SNAKE_CASE , ks[i:] )] if matches and all(__SCREAMING_SNAKE_CASE ): return True return False def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Dict: def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): for rule, replacement in rules: if _match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return replacement return val return replace def lowerCamelCase_()-> Optional[int]: return [ # embeddings (("transformer", "wpe", "embedding"), P("""mp""" , __SCREAMING_SNAKE_CASE )), (("transformer", "wte", "embedding"), P("""mp""" , __SCREAMING_SNAKE_CASE )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__SCREAMING_SNAKE_CASE , """mp""" )), (("attention", "out_proj", "kernel"), P("""mp""" , __SCREAMING_SNAKE_CASE )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__SCREAMING_SNAKE_CASE , """mp""" )), (("mlp", "c_fc", "bias"), P("""mp""" )), (("mlp", "c_proj", "kernel"), P("""mp""" , __SCREAMING_SNAKE_CASE )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]: _SCREAMING_SNAKE_CASE : Tuple = _get_partition_rules() _SCREAMING_SNAKE_CASE : Optional[Any] = _replacement_rules(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = {k: _unmatched for k in flatten_dict(__SCREAMING_SNAKE_CASE )} _SCREAMING_SNAKE_CASE : Optional[Any] = {k: replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__SCREAMING_SNAKE_CASE ) )
635
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Dict = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : str = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : List[Any] = -1 _SCREAMING_SNAKE_CASE : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Any = tokenizer.decode(greedy_ids[0]) _SCREAMING_SNAKE_CASE : List[Any] = TextIteratorStreamer(_A) _SCREAMING_SNAKE_CASE : Any = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[Any] = Thread(target=model.generate , kwargs=_A) thread.start() _SCREAMING_SNAKE_CASE : Any = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(_A , _A) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : str = greedy_ids[:, input_ids.shape[1] :] _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(new_greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A , skip_prompt=_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : Optional[int] = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""distilgpt2""") _SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""").to(_A) _SCREAMING_SNAKE_CASE : int = -1 _SCREAMING_SNAKE_CASE : List[str] = torch.ones((1, 5) , device=_A).long() * model.config.bos_token_id with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Optional[int] = TextStreamer(_A , skip_special_tokens=_A) model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _SCREAMING_SNAKE_CASE : Optional[Any] = cs.out[:-1] # Remove the final "\n" _SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , return_tensors="""pt""") self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1)) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Tuple = -1 _SCREAMING_SNAKE_CASE : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : int = TextIteratorStreamer(_A , timeout=0.001) _SCREAMING_SNAKE_CASE : List[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[str] = Thread(target=model.generate , kwargs=_A) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_A): _SCREAMING_SNAKE_CASE : str = """""" for new_text in streamer: streamer_text += new_text
635
1
"""simple docstring""" from __future__ import annotations lowerCAmelCase_ = 1.6021E-19 # units = C def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple[str, float]: if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif conductivity < 0: raise ValueError("""Conductivity cannot be negative""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative""" ) elif mobility < 0: raise ValueError("""mobility cannot be negative""" ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
635
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class _snake_case ( __snake_case ): """simple docstring""" a = "facebook/bart-large-mnli" a = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) a = "text_classifier" a = AutoTokenizer a = AutoModelForSequenceClassification a = ["text", ["text"]] a = ["text"] def _lowerCAmelCase ( self : int): """simple docstring""" super().setup() _SCREAMING_SNAKE_CASE : Any = self.model.config _SCREAMING_SNAKE_CASE : Any = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("""entail"""): _SCREAMING_SNAKE_CASE : List[Any] = int(_A) if self.entailment_id == -1: raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""") def _lowerCAmelCase ( self : Optional[Any] , _A : Tuple , _A : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = labels return self.pre_processor( [text] * len(_A) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , ) def _lowerCAmelCase ( self : Tuple , _A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = outputs.logits _SCREAMING_SNAKE_CASE : List[Any] = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
635
1
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''', '''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''', '''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''', '''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''', '''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''', '''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''', } class _snake_case ( __snake_case ): """simple docstring""" a = "bloom" a = ["past_key_values"] a = { "num_hidden_layers": "n_layer", "num_attention_heads": "n_head", } def __init__( self : Union[str, Any] , _A : List[Any]=2_5_0_8_8_0 , _A : Optional[int]=6_4 , _A : List[Any]=2 , _A : int=8 , _A : int=1e-5 , _A : Tuple=0.02 , _A : List[str]=True , _A : Dict=1 , _A : List[Any]=2 , _A : Any=False , _A : Dict=0.0 , _A : Tuple=0.0 , _A : Any=1 , _A : List[str]=False , **_A : str , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = vocab_size # Backward compatibility with n_embed kwarg _SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop("""n_embed""" , _A) _SCREAMING_SNAKE_CASE : Optional[int] = hidden_size if n_embed is None else n_embed _SCREAMING_SNAKE_CASE : Union[str, Any] = n_layer _SCREAMING_SNAKE_CASE : Dict = n_head _SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon _SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range _SCREAMING_SNAKE_CASE : List[Any] = use_cache _SCREAMING_SNAKE_CASE : Any = pretraining_tp _SCREAMING_SNAKE_CASE : Tuple = apply_residual_connection_post_layernorm _SCREAMING_SNAKE_CASE : List[str] = hidden_dropout _SCREAMING_SNAKE_CASE : str = attention_dropout _SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id _SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id _SCREAMING_SNAKE_CASE : Tuple = slow_but_exact super().__init__(bos_token_id=_A , eos_token_id=_A , **_A) class _snake_case ( __snake_case ): """simple docstring""" a = version.parse("1.12" ) def __init__( self : int , _A : PretrainedConfig , _A : str = "default" , _A : List[PatchingSpec] = None , _A : bool = False , ): """simple docstring""" super().__init__(_A , task=_A , patching_specs=_A , use_past=_A) if not getattr(self._config , """pad_token_id""" , _A): # TODO: how to do that better? _SCREAMING_SNAKE_CASE : Optional[int] = 0 @property def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}}) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(_A , direction="""inputs""" , inverted_values_shape=_A) _SCREAMING_SNAKE_CASE : Any = {0: """batch""", 1: """past_sequence + sequence"""} else: _SCREAMING_SNAKE_CASE : Union[str, Any] = {0: """batch""", 1: """sequence"""} return common_inputs @property def _lowerCAmelCase ( self : Tuple): """simple docstring""" return self._config.n_layer @property def _lowerCAmelCase ( self : List[str]): """simple docstring""" return self._config.n_head @property def _lowerCAmelCase ( self : List[str]): """simple docstring""" return 1e-3 def _lowerCAmelCase ( self : List[str] , _A : "PreTrainedTokenizer" , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional["TensorType"] = None , ): """simple docstring""" _SCREAMING_SNAKE_CASE : str = super(_A , self).generate_dummy_inputs( _A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A) # We need to order the input in the way they appears in the forward() _SCREAMING_SNAKE_CASE : Union[str, Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]}) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""") else: import torch _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values _SCREAMING_SNAKE_CASE : Dict = seqlen + 2 _SCREAMING_SNAKE_CASE : List[Any] = self._config.hidden_size // self.num_attention_heads _SCREAMING_SNAKE_CASE : Optional[int] = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) _SCREAMING_SNAKE_CASE : int = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) _SCREAMING_SNAKE_CASE : List[Any] = [ (torch.zeros(_A), torch.zeros(_A)) for _ in range(self.num_layers) ] _SCREAMING_SNAKE_CASE : Any = common_inputs["""attention_mask"""] if self.use_past: _SCREAMING_SNAKE_CASE : Optional[int] = ordered_inputs["""attention_mask"""].dtype _SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(_A , _A , dtype=_A)] , dim=1) return ordered_inputs @property def _lowerCAmelCase ( self : int): """simple docstring""" return 1_3
635
"""simple docstring""" import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class _snake_case ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""") _SCREAMING_SNAKE_CASE : str = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""") model.to(_A) from datasets import load_dataset _SCREAMING_SNAKE_CASE : Any = load_dataset("""nielsr/rvlcdip-demo""") _SCREAMING_SNAKE_CASE : Any = dataset["""train"""][0]["""image"""].convert("""RGB""") _SCREAMING_SNAKE_CASE : str = image_processor(_A , return_tensors="""pt""").to(_A) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE : Any = model(**_A) _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_6)) self.assertEqual(logits.shape , _A) _SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=_A , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4))
635
1
"""simple docstring""" import os import re import shutil import sys import tempfile import unittest import black lowerCAmelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. lowerCAmelCase_ = ''' def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states ''' class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , """models/bert/""")) _SCREAMING_SNAKE_CASE : Any = self.transformer_dir shutil.copy( os.path.join(_A , """src/transformers/models/bert/modeling_bert.py""") , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""") , ) def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = """src/transformers""" shutil.rmtree(self.transformer_dir) def _lowerCAmelCase ( self : int , _A : int , _A : Optional[int] , _A : Any , _A : Tuple=None): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: _SCREAMING_SNAKE_CASE : Dict = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result _SCREAMING_SNAKE_CASE : Tuple = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9) _SCREAMING_SNAKE_CASE : List[Any] = black.format_str(_A , mode=_A) _SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.transformer_dir , """new_code.py""") with open(_A , """w""" , newline="""\n""") as f: f.write(_A) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_A)) == 0) else: check_copies.is_copy_consistent(f.name , overwrite=_A) with open(_A , """r""") as f: self.assertTrue(f.read() , _A) def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : int = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""") self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , ) # With no empty line at the end self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _A , ) # Copy consistency with rename self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _A) , ) # Copy consistency with a really long name _SCREAMING_SNAKE_CASE : Optional[Any] = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason""" self.check_copy_consistency( f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub("""Bert""" , _A , _A) , ) # Copy consistency with overwrite self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _A , overwrite_result=re.sub("""Bert""" , """TestModel""" , _A) , ) def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""] _SCREAMING_SNAKE_CASE : Dict = ( """1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the""" """ Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for""" """ Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong""" """ Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.""" """ **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),""" """ released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and""" """ lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same""" """ method has been applied to compress GPT2 into""" """ [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into""" """ [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),""" """ Multilingual BERT into""" """ [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German""" """ version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**""" """ (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders""" """ as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang""" """ Luong, Quoc V. Le, Christopher D. Manning.""" ) _SCREAMING_SNAKE_CASE : List[Any] = ( """1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the""" """ Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of""" """ Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian""" """ Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n""" ) _SCREAMING_SNAKE_CASE : Any = ( """1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the""" """ Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of""" """ Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian""" """ Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.""" """ **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文""" """ [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and""" """ lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same""" """ method has been applied to compress GPT2 into""" """ [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into""" """ [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),""" """ Multilingual BERT into""" """ [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German""" """ version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自""" """ Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather""" """ than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,""" """ Christopher D. Manning 发布。\n""" ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = check_copies.convert_to_localized_md( _A , _A , localized_readme["""format_model_list"""]) self.assertFalse(_A) self.assertEqual(_A , _A) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = check_copies.convert_to_localized_md( _A , _A , localized_readme["""format_model_list"""]) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(_A) _SCREAMING_SNAKE_CASE : int = ( """1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the""" """ Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for""" """ Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong""" """ Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = ( """1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and""" """ the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of""" """ Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian""" """ Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = ( """1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the""" """ Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of""" """ Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian""" """ Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n""" ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = check_copies.convert_to_localized_md( _A , _A , localized_readme["""format_model_list"""]) # Check if the model link is synchronized. self.assertEqual(_A , _A)
635
"""simple docstring""" import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class _snake_case ( __snake_case ): """simple docstring""" a = "M-CLIP" def __init__( self : Optional[Any] , _A : List[str]=1_0_2_4 , _A : Union[str, Any]=7_6_8 , **_A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = transformerDimSize _SCREAMING_SNAKE_CASE : List[str] = imageDimSize super().__init__(**_A) class _snake_case ( __snake_case ): """simple docstring""" a = MCLIPConfig def __init__( self : Dict , _A : Optional[Any] , *_A : Any , **_A : Dict): """simple docstring""" super().__init__(_A , *_A , **_A) _SCREAMING_SNAKE_CASE : Tuple = XLMRobertaModel(_A) _SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims) def _lowerCAmelCase ( self : Union[str, Any] , _A : str , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.transformer(input_ids=_A , attention_mask=_A)[0] _SCREAMING_SNAKE_CASE : Optional[Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None] return self.LinearTransformation(_A), embs
635
1
"""simple docstring""" import torch from torch import nn class _snake_case ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , _A : Optional[int] , _A : List[Any] , _A : Any , _A : List[str] , _A : int=1 , _A : Union[str, Any]=False): """simple docstring""" super().__init__() _SCREAMING_SNAKE_CASE : Optional[int] = n_token _SCREAMING_SNAKE_CASE : int = d_embed _SCREAMING_SNAKE_CASE : List[str] = d_proj _SCREAMING_SNAKE_CASE : Tuple = cutoffs + [n_token] _SCREAMING_SNAKE_CASE : Any = [0] + self.cutoffs _SCREAMING_SNAKE_CASE : Optional[int] = div_val _SCREAMING_SNAKE_CASE : Any = self.cutoffs[0] _SCREAMING_SNAKE_CASE : Dict = len(self.cutoffs) - 1 _SCREAMING_SNAKE_CASE : Union[str, Any] = self.shortlist_size + self.n_clusters if self.n_clusters > 0: _SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed)) _SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.zeros(self.n_clusters)) _SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList() _SCREAMING_SNAKE_CASE : int = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs)): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(_A , _A))) else: self.out_projs.append(_A) self.out_layers.append(nn.Linear(_A , _A)) else: for i in range(len(self.cutoffs)): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1] _SCREAMING_SNAKE_CASE : Dict = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(_A , _A))) self.out_layers.append(nn.Linear(_A , r_idx - l_idx)) _SCREAMING_SNAKE_CASE : Optional[int] = keep_order def _lowerCAmelCase ( self : Optional[Any] , _A : List[str] , _A : Optional[Any] , _A : Tuple , _A : int): """simple docstring""" if proj is None: _SCREAMING_SNAKE_CASE : List[str] = nn.functional.linear(_A , _A , bias=_A) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: _SCREAMING_SNAKE_CASE : List[str] = nn.functional.linear(_A , proj.t().contiguous()) _SCREAMING_SNAKE_CASE : Optional[Any] = nn.functional.linear(_A , _A , bias=_A) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def _lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] , _A : Union[str, Any]=None , _A : Optional[Any]=False): """simple docstring""" if labels is not None: # Shift so that tokens < n predict n _SCREAMING_SNAKE_CASE : Dict = hidden[..., :-1, :].contiguous() _SCREAMING_SNAKE_CASE : List[Any] = labels[..., 1:].contiguous() _SCREAMING_SNAKE_CASE : Optional[Any] = hidden.view(-1 , hidden.size(-1)) _SCREAMING_SNAKE_CASE : List[str] = labels.view(-1) if hidden.size(0) != labels.size(0): raise RuntimeError("""Input and labels should have the same size in the batch dimension.""") else: _SCREAMING_SNAKE_CASE : List[Any] = hidden.view(-1 , hidden.size(-1)) if self.n_clusters == 0: _SCREAMING_SNAKE_CASE : str = self._compute_logit(_A , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0]) if labels is not None: _SCREAMING_SNAKE_CASE : List[str] = labels != -1_0_0 _SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros_like(_A , dtype=hidden.dtype , device=hidden.device) _SCREAMING_SNAKE_CASE : Tuple = ( -nn.functional.log_softmax(_A , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1) ) else: _SCREAMING_SNAKE_CASE : Any = nn.functional.log_softmax(_A , dim=-1) else: # construct weights and biases _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = [], [] for i in range(len(self.cutoffs)): if self.div_val == 1: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1] _SCREAMING_SNAKE_CASE : Optional[int] = self.out_layers[0].weight[l_idx:r_idx] _SCREAMING_SNAKE_CASE : Tuple = self.out_layers[0].bias[l_idx:r_idx] else: _SCREAMING_SNAKE_CASE : Tuple = self.out_layers[i].weight _SCREAMING_SNAKE_CASE : Optional[int] = self.out_layers[i].bias if i == 0: _SCREAMING_SNAKE_CASE : int = torch.cat([weight_i, self.cluster_weight] , dim=0) _SCREAMING_SNAKE_CASE : Tuple = torch.cat([bias_i, self.cluster_bias] , dim=0) weights.append(_A) biases.append(_A) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = weights[0], biases[0], self.out_projs[0] _SCREAMING_SNAKE_CASE : List[str] = self._compute_logit(_A , _A , _A , _A) _SCREAMING_SNAKE_CASE : List[str] = nn.functional.log_softmax(_A , dim=1) if labels is None: _SCREAMING_SNAKE_CASE : Union[str, Any] = hidden.new_empty((head_logit.size(0), self.n_token)) else: _SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros_like(_A , dtype=hidden.dtype , device=hidden.device) _SCREAMING_SNAKE_CASE : List[Any] = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = [0] + self.cutoffs for i in range(len(_A) - 1): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = cutoff_values[i], cutoff_values[i + 1] if labels is not None: _SCREAMING_SNAKE_CASE : Dict = (labels >= l_idx) & (labels < r_idx) _SCREAMING_SNAKE_CASE : Union[str, Any] = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue _SCREAMING_SNAKE_CASE : List[str] = labels.index_select(0 , _A) - l_idx _SCREAMING_SNAKE_CASE : List[str] = head_logprob.index_select(0 , _A) _SCREAMING_SNAKE_CASE : int = hidden.index_select(0 , _A) else: _SCREAMING_SNAKE_CASE : List[Any] = hidden if i == 0: if labels is not None: _SCREAMING_SNAKE_CASE : List[Any] = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1) else: _SCREAMING_SNAKE_CASE : Optional[int] = head_logprob[:, : self.cutoffs[0]] else: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = weights[i], biases[i], self.out_projs[i] _SCREAMING_SNAKE_CASE : int = self._compute_logit(_A , _A , _A , _A) _SCREAMING_SNAKE_CASE : Dict = nn.functional.log_softmax(_A , dim=1) _SCREAMING_SNAKE_CASE : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: _SCREAMING_SNAKE_CASE : Any = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1 , target_i[:, None]).squeeze(1) else: _SCREAMING_SNAKE_CASE : List[str] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i _SCREAMING_SNAKE_CASE : Union[str, Any] = logprob_i if labels is not None: if (hasattr(self , """keep_order""") and self.keep_order) or keep_order: out.index_copy_(0 , _A , -logprob_i) else: out[offset : offset + logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return out def _lowerCAmelCase ( self : Dict , _A : Any): """simple docstring""" if self.n_clusters == 0: _SCREAMING_SNAKE_CASE : Union[str, Any] = self._compute_logit(_A , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0]) return nn.functional.log_softmax(_A , dim=-1) else: # construct weights and biases _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = [], [] for i in range(len(self.cutoffs)): if self.div_val == 1: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = self.cutoff_ends[i], self.cutoff_ends[i + 1] _SCREAMING_SNAKE_CASE : List[Any] = self.out_layers[0].weight[l_idx:r_idx] _SCREAMING_SNAKE_CASE : str = self.out_layers[0].bias[l_idx:r_idx] else: _SCREAMING_SNAKE_CASE : Optional[int] = self.out_layers[i].weight _SCREAMING_SNAKE_CASE : Any = self.out_layers[i].bias if i == 0: _SCREAMING_SNAKE_CASE : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0) _SCREAMING_SNAKE_CASE : Tuple = torch.cat([bias_i, self.cluster_bias] , dim=0) weights.append(_A) biases.append(_A) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = weights[0], biases[0], self.out_projs[0] _SCREAMING_SNAKE_CASE : Optional[Any] = self._compute_logit(_A , _A , _A , _A) _SCREAMING_SNAKE_CASE : List[Any] = hidden.new_empty((head_logit.size(0), self.n_token)) _SCREAMING_SNAKE_CASE : Union[str, Any] = nn.functional.log_softmax(_A , dim=1) _SCREAMING_SNAKE_CASE : str = [0] + self.cutoffs for i in range(len(_A) - 1): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = cutoff_values[i], cutoff_values[i + 1] if i == 0: _SCREAMING_SNAKE_CASE : List[Any] = head_logprob[:, : self.cutoffs[0]] else: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = weights[i], biases[i], self.out_projs[i] _SCREAMING_SNAKE_CASE : Any = self._compute_logit(_A , _A , _A , _A) _SCREAMING_SNAKE_CASE : Optional[Any] = nn.functional.log_softmax(_A , dim=1) _SCREAMING_SNAKE_CASE : Union[str, Any] = head_logprob[:, -i] + tail_logprob_i _SCREAMING_SNAKE_CASE : Dict = logprob_i return out
635
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) _SCREAMING_SNAKE_CASE : int = precision _SCREAMING_SNAKE_CASE : Dict = ceil(precision / 14 ) _SCREAMING_SNAKE_CASE : int = 426_880 * Decimal(10_005 ).sqrt() _SCREAMING_SNAKE_CASE : Union[str, Any] = 1 _SCREAMING_SNAKE_CASE : str = 13_591_409 _SCREAMING_SNAKE_CASE : Tuple = Decimal(__SCREAMING_SNAKE_CASE ) for k in range(1 , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__SCREAMING_SNAKE_CASE ) ** 3) linear_term += 545_140_134 exponential_term *= -262_537_412_640_768_000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(F"The first {n} digits of pi is: {pi(n)}")
635
1
"""simple docstring""" import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: _SCREAMING_SNAKE_CASE : Optional[Any] = [] for line in lines: _SCREAMING_SNAKE_CASE : int = re.sub(R"""#.*""" , """""" , __SCREAMING_SNAKE_CASE ) # remove comments if line: filtered_lines.append(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[Any] = """\n""".join(__SCREAMING_SNAKE_CASE ) # Make a hash from all this code _SCREAMING_SNAKE_CASE : List[Any] = full_str.encode("""utf-8""" ) return shaaaa(__SCREAMING_SNAKE_CASE ).hexdigest() # get importable module names and hash for caching lowerCAmelCase_ = { '''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), '''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), '''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), '''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), '''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), '''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), '''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), '''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions lowerCAmelCase_ = { '''.csv''': ('''csv''', {}), '''.tsv''': ('''csv''', {'''sep''': '''\t'''}), '''.json''': ('''json''', {}), '''.jsonl''': ('''json''', {}), '''.parquet''': ('''parquet''', {}), '''.arrow''': ('''arrow''', {}), '''.txt''': ('''text''', {}), } _EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) lowerCAmelCase_ = {'''imagefolder''', '''audiofolder'''} # Used to filter data files based on extensions given a module name lowerCAmelCase_ = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''') _MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
635
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _SCREAMING_SNAKE_CASE : Optional[int] = TapasConfig.from_json_file(__SCREAMING_SNAKE_CASE ) # set absolute/relative position embeddings parameter _SCREAMING_SNAKE_CASE : Dict = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _SCREAMING_SNAKE_CASE : str = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WTQ": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : Optional[int] = 4 _SCREAMING_SNAKE_CASE : Any = True # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 0.66_46_94 _SCREAMING_SNAKE_CASE : str = 0.20_79_51 _SCREAMING_SNAKE_CASE : str = 0.12_11_94 _SCREAMING_SNAKE_CASE : List[Any] = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = 0.0_35_25_13 _SCREAMING_SNAKE_CASE : Optional[Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : int = 4 _SCREAMING_SNAKE_CASE : Tuple = False # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 36.45_19 _SCREAMING_SNAKE_CASE : Union[str, Any] = 0.90_34_21 _SCREAMING_SNAKE_CASE : Optional[Any] = 2_22.0_88 _SCREAMING_SNAKE_CASE : Any = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Optional[int] = True _SCREAMING_SNAKE_CASE : Dict = 0.76_31_41 _SCREAMING_SNAKE_CASE : Union[str, Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "TABFACT": _SCREAMING_SNAKE_CASE : int = TapasForSequenceClassification(config=__SCREAMING_SNAKE_CASE ) elif task == "MLM": _SCREAMING_SNAKE_CASE : int = TapasForMaskedLM(config=__SCREAMING_SNAKE_CASE ) elif task == "INTERMEDIATE_PRETRAINING": _SCREAMING_SNAKE_CASE : int = TapasModel(config=__SCREAMING_SNAKE_CASE ) else: raise ValueError(F"""Task {task} not supported.""" ) print(F"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Save pytorch-model (weights and configuration) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Save tokenizer files print(F"""Save tokenizer files to {pytorch_dump_path}""" ) _SCREAMING_SNAKE_CASE : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 ) tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
635
1
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class _snake_case ( __snake_case , unittest.TestCase ): """simple docstring""" a = BarthezTokenizer a = BarthezTokenizerFast a = True a = True def _lowerCAmelCase ( self : int): """simple docstring""" super().setUp() _SCREAMING_SNAKE_CASE : int = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""") tokenizer.save_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname , legacy_format=_A) _SCREAMING_SNAKE_CASE : List[str] = tokenizer def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = """<pad>""" _SCREAMING_SNAKE_CASE : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A) , _A) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A) , _A) def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , """<s>""") self.assertEqual(vocab_keys[1] , """<pad>""") self.assertEqual(vocab_keys[-1] , """<mask>""") self.assertEqual(len(_A) , 1_0_1_1_2_2) def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2) @require_torch def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] _SCREAMING_SNAKE_CASE : Any = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2] _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer( _A , max_length=len(_A) , padding=_A , truncation=_A , return_tensors="""pt""") self.assertIsInstance(_A , _A) self.assertEqual((2, 6) , batch.input_ids.shape) self.assertEqual((2, 6) , batch.attention_mask.shape) _SCREAMING_SNAKE_CASE : int = batch.input_ids.tolist()[0] self.assertListEqual(_A , _A) def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" if not self.test_rust_tokenizer: return _SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() _SCREAMING_SNAKE_CASE : str = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE : Tuple = """I was born in 92000, and this is falsé.""" _SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A) _SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A) self.assertListEqual(_A , _A) _SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(_A , add_special_tokens=_A) _SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A) self.assertListEqual(_A , _A) _SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode(_A) _SCREAMING_SNAKE_CASE : List[Any] = rust_tokenizer.encode(_A) self.assertListEqual(_A , _A) @slow def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _SCREAMING_SNAKE_CASE : Any = [ """Le transformeur est un modèle d'apprentissage profond introduit en 2017, """ """utilisé principalement dans le domaine du traitement automatique des langues (TAL).""", """À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """ """pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """ """telles que la traduction et la synthèse de texte.""", ] self.tokenizer_integration_test_util( expected_encoding=_A , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=_A , )
635
"""simple docstring""" from typing import Any import numpy as np def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool: return np.array_equal(__SCREAMING_SNAKE_CASE , matrix.conjugate().T ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: _SCREAMING_SNAKE_CASE : Optional[int] = v.conjugate().T _SCREAMING_SNAKE_CASE : Optional[int] = v_star.dot(__SCREAMING_SNAKE_CASE ) assert isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) return (v_star_dot.dot(__SCREAMING_SNAKE_CASE )) / (v_star.dot(__SCREAMING_SNAKE_CASE )) def lowerCamelCase_()-> None: _SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) _SCREAMING_SNAKE_CASE : int = np.array([[1], [2], [3]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" print(rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" assert rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
635
1
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def lowerCamelCase_()-> Tuple: _SCREAMING_SNAKE_CASE : List[str] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = parser.add_subparsers(help="""accelerate command helpers""" ) # Register commands get_config_parser(subparsers=__SCREAMING_SNAKE_CASE ) env_command_parser(subparsers=__SCREAMING_SNAKE_CASE ) launch_command_parser(subparsers=__SCREAMING_SNAKE_CASE ) tpu_command_parser(subparsers=__SCREAMING_SNAKE_CASE ) test_command_parser(subparsers=__SCREAMING_SNAKE_CASE ) # Let's go _SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args() if not hasattr(__SCREAMING_SNAKE_CASE , """func""" ): parser.print_help() exit(1 ) # Run args.func(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
635
"""simple docstring""" from __future__ import annotations def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative in a semiconductor""" ) elif hole_conc < 0: raise ValueError("""Hole concentration cannot be negative in a semiconductor""" ) elif intrinsic_conc < 0: raise ValueError( """Intrinsic concentration cannot be negative in a semiconductor""" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
635
1
"""simple docstring""" from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
635
"""simple docstring""" import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase_ = 16 lowerCAmelCase_ = 32 def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 )-> str: _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetDict( { """train""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """validation""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """test""": dataset["""validation"""], } ) def tokenize_function(__SCREAMING_SNAKE_CASE ): # max_length=None => use the model max length (it's actually the default) _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _SCREAMING_SNAKE_CASE : str = datasets.map( __SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__SCREAMING_SNAKE_CASE ): # On TPU it's best to pad everything to the same length or training will be very slow. _SCREAMING_SNAKE_CASE : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _SCREAMING_SNAKE_CASE : Optional[Any] = 16 elif accelerator.mixed_precision != "no": _SCREAMING_SNAKE_CASE : Any = 8 else: _SCREAMING_SNAKE_CASE : Optional[int] = None return tokenizer.pad( __SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , ) # Instantiate dataloaders. _SCREAMING_SNAKE_CASE : int = DataLoader( tokenized_datasets["""train"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["""validation"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets["""test"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader, test_dataloader def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: # New Code # _SCREAMING_SNAKE_CASE : Union[str, Any] = [] # Download the dataset _SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""glue""" , """mrpc""" ) # Create our splits _SCREAMING_SNAKE_CASE : Dict = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator _SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _SCREAMING_SNAKE_CASE : Tuple = config["""lr"""] _SCREAMING_SNAKE_CASE : Tuple = int(config["""num_epochs"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""seed"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""batch_size"""] ) _SCREAMING_SNAKE_CASE : List[str] = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation _SCREAMING_SNAKE_CASE : Any = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _SCREAMING_SNAKE_CASE : List[str] = batch_size // MAX_GPU_BATCH_SIZE _SCREAMING_SNAKE_CASE : List[str] = MAX_GPU_BATCH_SIZE set_seed(__SCREAMING_SNAKE_CASE ) # New Code # # Create our folds: _SCREAMING_SNAKE_CASE : List[str] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] ) _SCREAMING_SNAKE_CASE : Optional[Any] = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = get_fold_dataloaders( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device ) # Instantiate optimizer _SCREAMING_SNAKE_CASE : int = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE ) # Instantiate scheduler _SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup( optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(__SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = outputs.loss _SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps accelerator.backward(__SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , ) _SCREAMING_SNAKE_CASE : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE ) # New Code # # We also run predictions on the test set at the very end _SCREAMING_SNAKE_CASE : str = [] for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: _SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) _SCREAMING_SNAKE_CASE : List[str] = torch.stack(__SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) _SCREAMING_SNAKE_CASE : int = metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE ) accelerator.print("""Average test metrics from all folds:""" , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_()-> Optional[Any]: _SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) # New Code # parser.add_argument("""--num_folds""" , type=__SCREAMING_SNAKE_CASE , default=3 , help="""The number of splits to perform across the dataset""" ) _SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() _SCREAMING_SNAKE_CASE : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
635
1
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: if not head: return True # split the list to two parts _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = head.next, head while fast and fast.next: _SCREAMING_SNAKE_CASE : int = fast.next.next _SCREAMING_SNAKE_CASE : Tuple = slow.next _SCREAMING_SNAKE_CASE : List[str] = slow.next _SCREAMING_SNAKE_CASE : Optional[Any] = None # Don't forget here! But forget still works! # reverse the second part _SCREAMING_SNAKE_CASE : Any = None while second: _SCREAMING_SNAKE_CASE : Dict = second.next _SCREAMING_SNAKE_CASE : List[Any] = node _SCREAMING_SNAKE_CASE : List[Any] = second _SCREAMING_SNAKE_CASE : List[Any] = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False _SCREAMING_SNAKE_CASE : Optional[Any] = node.next _SCREAMING_SNAKE_CASE : Tuple = head.next return True def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[Any]: if not head or not head.next: return True # 1. Get the midpoint (slow) _SCREAMING_SNAKE_CASE : List[Any] = head while fast and fast.next: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = fast.next.next, slow.next # 2. Push the second half into the stack _SCREAMING_SNAKE_CASE : List[Any] = [slow.val] while slow.next: _SCREAMING_SNAKE_CASE : int = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False _SCREAMING_SNAKE_CASE : Optional[Any] = cur.next return True def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: if not head or not head.next: return True _SCREAMING_SNAKE_CASE : Optional[int] = {} _SCREAMING_SNAKE_CASE : Dict = 0 while head: if head.val in d: d[head.val].append(__SCREAMING_SNAKE_CASE ) else: _SCREAMING_SNAKE_CASE : Tuple = [pos] _SCREAMING_SNAKE_CASE : Tuple = head.next pos += 1 _SCREAMING_SNAKE_CASE : Optional[Any] = pos - 1 _SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for v in d.values(): if len(__SCREAMING_SNAKE_CASE ) % 2 != 0: middle += 1 else: _SCREAMING_SNAKE_CASE : Tuple = 0 for i in range(0 , len(__SCREAMING_SNAKE_CASE ) ): if v[i] + v[len(__SCREAMING_SNAKE_CASE ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
635
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_upernet''': ['''UperNetConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''UperNetForSemanticSegmentation''', '''UperNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
"""simple docstring""" import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class _snake_case : """simple docstring""" def __init__( self : int , _A : List[Any] , _A : int , _A : int): """simple docstring""" if dst_width < 0 or dst_height < 0: raise ValueError("""Destination width/height should be > 0""") _SCREAMING_SNAKE_CASE : str = img _SCREAMING_SNAKE_CASE : Optional[Any] = img.shape[1] _SCREAMING_SNAKE_CASE : Tuple = img.shape[0] _SCREAMING_SNAKE_CASE : Any = dst_width _SCREAMING_SNAKE_CASE : Any = dst_height _SCREAMING_SNAKE_CASE : Any = self.src_w / self.dst_w _SCREAMING_SNAKE_CASE : Dict = self.src_h / self.dst_h _SCREAMING_SNAKE_CASE : Optional[Any] = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_5_5 ) def _lowerCAmelCase ( self : Tuple): """simple docstring""" for i in range(self.dst_h): for j in range(self.dst_w): _SCREAMING_SNAKE_CASE : Any = self.img[self.get_y(_A)][self.get_x(_A)] def _lowerCAmelCase ( self : int , _A : int): """simple docstring""" return int(self.ratio_x * x) def _lowerCAmelCase ( self : str , _A : int): """simple docstring""" return int(self.ratio_y * y) if __name__ == "__main__": lowerCAmelCase_ , lowerCAmelCase_ = 800, 600 lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1) lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output ) waitKey(0) destroyAllWindows()
635
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule lowerCAmelCase_ = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
"""simple docstring""" import argparse from collections import defaultdict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : str = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines() _SCREAMING_SNAKE_CASE : Optional[Any] = F"""class {class_name}(""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{4 * " "}def {test_name}(""" _SCREAMING_SNAKE_CASE : Tuple = F"""{8 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{16 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[str] = False _SCREAMING_SNAKE_CASE : Tuple = False _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : Any = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = 0 _SCREAMING_SNAKE_CASE : Dict = [] for line in lines: if line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = True elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = True elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )): _SCREAMING_SNAKE_CASE : Dict = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _SCREAMING_SNAKE_CASE : int = True if in_class and in_func and in_line: if ")" not in line: continue else: _SCREAMING_SNAKE_CASE : Any = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) _SCREAMING_SNAKE_CASE : Optional[int] = False else: new_lines.append(__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , """w""" ) as f: for line in new_lines: f.write(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None )-> Optional[Any]: if fail is not None: with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()} else: _SCREAMING_SNAKE_CASE : str = None with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : str = f.readlines() _SCREAMING_SNAKE_CASE : str = defaultdict(__SCREAMING_SNAKE_CASE ) for line in correct_lines: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) lowerCAmelCase_ = parser.parse_args() main(args.correct_filename, args.fail_filename)
635
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''', # See all SEW models at https://huggingface.co/models?filter=sew } class _snake_case ( __snake_case ): """simple docstring""" a = "sew" def __init__( self : List[Any] , _A : Tuple=3_2 , _A : str=7_6_8 , _A : Dict=1_2 , _A : Tuple=1_2 , _A : Optional[Any]=3_0_7_2 , _A : List[str]=2 , _A : Dict="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.0 , _A : str=0.1 , _A : Tuple=0.1 , _A : Optional[int]=0.02 , _A : Dict=1e-5 , _A : str="group" , _A : Tuple="gelu" , _A : Union[str, Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _A : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Tuple=False , _A : Tuple=1_2_8 , _A : int=1_6 , _A : Union[str, Any]=True , _A : Optional[Any]=0.05 , _A : List[Any]=1_0 , _A : Union[str, Any]=2 , _A : Tuple=0.0 , _A : Union[str, Any]=1_0 , _A : Optional[int]=0 , _A : Union[str, Any]="mean" , _A : Optional[int]=False , _A : List[Any]=False , _A : int=2_5_6 , _A : str=0 , _A : Optional[int]=1 , _A : List[Any]=2 , **_A : Dict , ): """simple docstring""" super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A) _SCREAMING_SNAKE_CASE : str = hidden_size _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation _SCREAMING_SNAKE_CASE : Dict = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : str = conv_bias _SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embeddings _SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups _SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim) _SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers _SCREAMING_SNAKE_CASE : List[str] = intermediate_size _SCREAMING_SNAKE_CASE : str = squeeze_factor _SCREAMING_SNAKE_CASE : Dict = hidden_act _SCREAMING_SNAKE_CASE : str = num_attention_heads _SCREAMING_SNAKE_CASE : Dict = hidden_dropout _SCREAMING_SNAKE_CASE : Tuple = attention_dropout _SCREAMING_SNAKE_CASE : int = activation_dropout _SCREAMING_SNAKE_CASE : Any = feat_proj_dropout _SCREAMING_SNAKE_CASE : str = final_dropout _SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop _SCREAMING_SNAKE_CASE : Any = layer_norm_eps _SCREAMING_SNAKE_CASE : int = initializer_range _SCREAMING_SNAKE_CASE : List[Any] = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""") # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment _SCREAMING_SNAKE_CASE : List[Any] = mask_time_prob _SCREAMING_SNAKE_CASE : List[str] = mask_time_length _SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_prob _SCREAMING_SNAKE_CASE : int = mask_feature_length _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks # ctc loss _SCREAMING_SNAKE_CASE : int = ctc_loss_reduction _SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity # sequence classification _SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum _SCREAMING_SNAKE_CASE : List[str] = classifier_proj_size @property def _lowerCAmelCase ( self : Any): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1)
635
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowerCAmelCase_ = { '''text_branch''': '''text_model''', '''audio_branch''': '''audio_model.audio_encoder''', '''attn''': '''attention.self''', '''self.proj''': '''output.dense''', '''attention.self_mask''': '''attn_mask''', '''mlp.fc1''': '''intermediate.dense''', '''mlp.fc2''': '''output.dense''', '''norm1''': '''layernorm_before''', '''norm2''': '''layernorm_after''', '''bn0''': '''batch_norm''', } lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''') def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = create_model( """HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = {} _SCREAMING_SNAKE_CASE : Optional[Any] = R""".*sequential.(\d+).*""" _SCREAMING_SNAKE_CASE : Any = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # replace sequential layers with list _SCREAMING_SNAKE_CASE : List[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) _SCREAMING_SNAKE_CASE : Dict = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.""" ) elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... _SCREAMING_SNAKE_CASE : Dict = 1 if projecton_layer == 0 else 2 _SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value _SCREAMING_SNAKE_CASE : Dict = value _SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv.size(0 ) // 3 _SCREAMING_SNAKE_CASE : Optional[Any] = mixed_qkv[:qkv_dim] _SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim : qkv_dim * 2] _SCREAMING_SNAKE_CASE : Any = mixed_qkv[qkv_dim * 2 :] _SCREAMING_SNAKE_CASE : Dict = query_layer _SCREAMING_SNAKE_CASE : List[Any] = key_layer _SCREAMING_SNAKE_CASE : Dict = value_layer else: _SCREAMING_SNAKE_CASE : Optional[Any] = value return model_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> List[Any]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE ) clap_model.eval() _SCREAMING_SNAKE_CASE : Dict = clap_model.state_dict() _SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = ClapConfig() _SCREAMING_SNAKE_CASE : Tuple = enable_fusion _SCREAMING_SNAKE_CASE : Dict = ClapModel(__SCREAMING_SNAKE_CASE ) # ignore the spectrogram embedding layer model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''') lowerCAmelCase_ = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
635
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ = { '''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GraphormerForGraphClassification''', '''GraphormerModel''', '''GraphormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , ) assert hasattr(self , """env""") def _lowerCAmelCase ( self : Union[str, Any] , _A : str=1): """simple docstring""" return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , ) def _lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any]): """simple docstring""" TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""") def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.create_estimator() # run training estimator.fit() # result dataframe _SCREAMING_SNAKE_CASE : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""]) _SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""]) # get train time from SageMaker job, this includes starting, preprocessing, stopping _SCREAMING_SNAKE_CASE : int = ( Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy) assert all(t <= self.results["""eval_loss"""] for t in eval_loss) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
635
1
"""simple docstring""" import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class _snake_case ( __snake_case ): """simple docstring""" def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(_A , """hidden_sizes""")) self.parent.assertTrue(hasattr(_A , """num_attention_heads""")) class _snake_case : """simple docstring""" def __init__( self : str , _A : Optional[int] , _A : Dict=1_3 , _A : Dict=6_4 , _A : Any=3 , _A : Optional[Any]=3 , _A : str=2 , _A : List[str]=1 , _A : Dict=1_6 , _A : List[str]=[1_2_8, 2_5_6, 3_8_4] , _A : Dict=[4, 6, 8] , _A : List[Any]=[2, 3, 4] , _A : int=[1_6, 1_6, 1_6] , _A : Union[str, Any]=0 , _A : Optional[Any]=[2, 2, 2] , _A : Dict=[2, 2, 2] , _A : Any=0.02 , _A : List[str]=True , _A : int=True , _A : Optional[Any]=2 , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = parent _SCREAMING_SNAKE_CASE : Dict = batch_size _SCREAMING_SNAKE_CASE : Union[str, Any] = image_size _SCREAMING_SNAKE_CASE : Any = num_channels _SCREAMING_SNAKE_CASE : int = kernel_size _SCREAMING_SNAKE_CASE : Union[str, Any] = stride _SCREAMING_SNAKE_CASE : Optional[Any] = padding _SCREAMING_SNAKE_CASE : List[Any] = hidden_sizes _SCREAMING_SNAKE_CASE : int = num_attention_heads _SCREAMING_SNAKE_CASE : str = depths _SCREAMING_SNAKE_CASE : Dict = key_dim _SCREAMING_SNAKE_CASE : int = drop_path_rate _SCREAMING_SNAKE_CASE : Tuple = patch_size _SCREAMING_SNAKE_CASE : Optional[int] = attention_ratio _SCREAMING_SNAKE_CASE : List[Any] = mlp_ratio _SCREAMING_SNAKE_CASE : int = initializer_range _SCREAMING_SNAKE_CASE : Dict = [ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] _SCREAMING_SNAKE_CASE : Dict = is_training _SCREAMING_SNAKE_CASE : Tuple = use_labels _SCREAMING_SNAKE_CASE : Optional[int] = num_labels _SCREAMING_SNAKE_CASE : Optional[int] = initializer_range def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _SCREAMING_SNAKE_CASE : Union[str, Any] = None if self.use_labels: _SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels) _SCREAMING_SNAKE_CASE : Tuple = self.get_config() return config, pixel_values, labels def _lowerCAmelCase ( self : Tuple): """simple docstring""" return LevitConfig( image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , ) def _lowerCAmelCase ( self : List[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = LevitModel(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : List[str] = model(_A) _SCREAMING_SNAKE_CASE : Dict = (self.image_size, self.image_size) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = image_size[0], image_size[1] for _ in range(4): _SCREAMING_SNAKE_CASE : str = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1) _SCREAMING_SNAKE_CASE : Optional[Any] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, ceil(height / 4) * ceil(width / 4), self.hidden_sizes[-1]) , ) def _lowerCAmelCase ( self : str , _A : Any , _A : Optional[int] , _A : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = self.num_labels _SCREAMING_SNAKE_CASE : int = LevitForImageClassification(_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : str = model(_A , labels=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = config_and_inputs _SCREAMING_SNAKE_CASE : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): """simple docstring""" a = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) a = ( { "feature-extraction": LevitModel, "image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) a = False a = False a = False a = False a = False def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = LevitModelTester(self) _SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7) def _lowerCAmelCase ( self : Any): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCAmelCase ( self : str): """simple docstring""" return @unittest.skip(reason="""Levit does not use inputs_embeds""") def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" pass @unittest.skip(reason="""Levit does not support input and output embeddings""") def _lowerCAmelCase ( self : Dict): """simple docstring""" pass @unittest.skip(reason="""Levit does not output attentions""") def _lowerCAmelCase ( self : int): """simple docstring""" pass def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : Any = model_class(_A) _SCREAMING_SNAKE_CASE : Any = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _SCREAMING_SNAKE_CASE : List[str] = [*signature.parameters.keys()] _SCREAMING_SNAKE_CASE : Dict = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _A) def _lowerCAmelCase ( self : List[str]): """simple docstring""" def check_hidden_states_output(_A : Optional[Any] , _A : int , _A : List[str]): _SCREAMING_SNAKE_CASE : Dict = model_class(_A) model.to(_A) model.eval() with torch.no_grad(): _SCREAMING_SNAKE_CASE : Any = model(**self._prepare_for_class(_A , _A)) _SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.hidden_states _SCREAMING_SNAKE_CASE : Dict = len(self.model_tester.depths) + 1 self.assertEqual(len(_A) , _A) _SCREAMING_SNAKE_CASE : int = (self.model_tester.image_size, self.model_tester.image_size) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = image_size[0], image_size[1] for _ in range(4): _SCREAMING_SNAKE_CASE : str = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1) _SCREAMING_SNAKE_CASE : Optional[Any] = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [ height * width, self.model_tester.hidden_sizes[0], ] , ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : int = True check_hidden_states_output(_A , _A , _A) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _SCREAMING_SNAKE_CASE : str = True check_hidden_states_output(_A , _A , _A) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""") def _lowerCAmelCase ( self : List[str]): """simple docstring""" pass def _lowerCAmelCase ( self : Union[str, Any] , _A : int , _A : Optional[Any] , _A : Optional[Any]=False): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = super()._prepare_for_class(_A , _A , return_labels=_A) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A) def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A) def _lowerCAmelCase ( self : int): """simple docstring""" if not self.model_tester.is_training: return _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE : Union[str, Any] = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(_A) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue _SCREAMING_SNAKE_CASE : str = model_class(_A) model.to(_A) model.train() _SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(_A , _A , return_labels=_A) _SCREAMING_SNAKE_CASE : List[Any] = model(**_A).loss loss.backward() def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _SCREAMING_SNAKE_CASE : int = False _SCREAMING_SNAKE_CASE : Union[str, Any] = True for model_class in self.all_model_classes: if model_class in get_values(_A) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue _SCREAMING_SNAKE_CASE : Optional[Any] = model_class(_A) model.gradient_checkpointing_enable() model.to(_A) model.train() _SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(_A , _A , return_labels=_A) _SCREAMING_SNAKE_CASE : Optional[int] = model(**_A).loss loss.backward() def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE : Optional[Any] = [ {"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float}, {"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long}, {"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(_A), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}"""): _SCREAMING_SNAKE_CASE : Any = problem_type["""title"""] _SCREAMING_SNAKE_CASE : Optional[int] = problem_type["""num_labels"""] _SCREAMING_SNAKE_CASE : Any = model_class(_A) model.to(_A) model.train() _SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(_A , _A , return_labels=_A) if problem_type["num_labels"] > 1: _SCREAMING_SNAKE_CASE : List[str] = inputs["""labels"""].unsqueeze(1).repeat(1 , problem_type["""num_labels"""]) _SCREAMING_SNAKE_CASE : List[Any] = inputs["""labels"""].to(problem_type["""dtype"""]) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=_A) as warning_list: _SCREAMING_SNAKE_CASE : Dict = model(**_A).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message): raise ValueError( f"""Something is going wrong in the regression problem: intercepted {w.message}""") loss.backward() @slow def _lowerCAmelCase ( self : int): """simple docstring""" for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE : Optional[Any] = LevitModel.from_pretrained(_A) self.assertIsNotNone(_A) def lowerCamelCase_()-> List[str]: _SCREAMING_SNAKE_CASE : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def _lowerCAmelCase ( self : Tuple): """simple docstring""" return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) @slow def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to( _A) _SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor _SCREAMING_SNAKE_CASE : List[Any] = prepare_img() _SCREAMING_SNAKE_CASE : List[Any] = image_processor(images=_A , return_tensors="""pt""").to(_A) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[Any] = model(**_A) # verify the logits _SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , _A) _SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([1.0_448, -0.3_745, -1.8_317]).to(_A) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4))
635
"""simple docstring""" import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip lowerCAmelCase_ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: return max(metric_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for gt in ground_truths ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Dict = [] if args.gold_data_mode == "qa": _SCREAMING_SNAKE_CASE : int = pd.read_csv(__SCREAMING_SNAKE_CASE , sep="""\t""" , header=__SCREAMING_SNAKE_CASE ) for answer_list in data[1]: _SCREAMING_SNAKE_CASE : Union[str, Any] = ast.literal_eval(__SCREAMING_SNAKE_CASE ) answers.append(__SCREAMING_SNAKE_CASE ) else: _SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[int] = [[reference] for reference in references] _SCREAMING_SNAKE_CASE : Optional[int] = 0 for prediction, ground_truths in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): total += 1 em += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) fa += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = 1_00.0 * em / total _SCREAMING_SNAKE_CASE : Optional[Any] = 1_00.0 * fa / total logger.info(F"""F1: {fa:.2f}""" ) logger.info(F"""EM: {em:.2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = args.k _SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[Any] = 0 for hypo, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = set(hypo.split("""\t""" )[:k] ) _SCREAMING_SNAKE_CASE : Union[str, Any] = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k _SCREAMING_SNAKE_CASE : int = 1_00.0 * em / total logger.info(F"""Precision@{k}: {em: .2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: def strip_title(__SCREAMING_SNAKE_CASE ): if title.startswith("""\"""" ): _SCREAMING_SNAKE_CASE : Optional[int] = title[1:] if title.endswith("""\"""" ): _SCREAMING_SNAKE_CASE : str = title[:-1] return title _SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device ) _SCREAMING_SNAKE_CASE : List[str] = rag_model.rag.question_encoder(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = question_enc_outputs[0] _SCREAMING_SNAKE_CASE : List[Any] = rag_model.retriever( __SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) _SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for docs in all_docs: _SCREAMING_SNAKE_CASE : str = [strip_title(__SCREAMING_SNAKE_CASE ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(__SCREAMING_SNAKE_CASE ) ) return provenance_strings def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.attention_mask.to(args.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.generate( # rag_model overwrites generate __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) _SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) if args.print_predictions: for q, a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): logger.info("""Q: {} - A: {}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) return answers def lowerCamelCase_()-> List[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__SCREAMING_SNAKE_CASE , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=__SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=__SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=__SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__SCREAMING_SNAKE_CASE , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=__SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=__SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=__SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=50 , type=__SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) _SCREAMING_SNAKE_CASE : Dict = parser.parse_args() _SCREAMING_SNAKE_CASE : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : Union[str, Any] = {} if args.model_type is None: _SCREAMING_SNAKE_CASE : Optional[int] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration _SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs if args.index_name is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = args.index_name if args.index_path is not None: _SCREAMING_SNAKE_CASE : Any = args.index_path else: _SCREAMING_SNAKE_CASE : Any = BartForConditionalGeneration _SCREAMING_SNAKE_CASE : int = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = get_scores if args.eval_mode == """e2e""" else get_precision_at_k _SCREAMING_SNAKE_CASE : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(__SCREAMING_SNAKE_CASE ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , retriever=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.retriever.init_retrieval() else: _SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: _SCREAMING_SNAKE_CASE : str = [] for line in tqdm(__SCREAMING_SNAKE_CASE ): questions.append(line.strip() ) if len(__SCREAMING_SNAKE_CASE ) == args.eval_batch_size: _SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) + """\n""" ) preds_file.flush() _SCREAMING_SNAKE_CASE : Any = [] if len(__SCREAMING_SNAKE_CASE ) > 0: _SCREAMING_SNAKE_CASE : List[str] = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) ) preds_file.flush() score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": lowerCAmelCase_ = get_args() main(args)
635
1
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _snake_case ( __snake_case , unittest.TestCase ): """simple docstring""" a = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def _lowerCAmelCase ( self : List[Any] , _A : Dict=0): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(_A)) _SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(_A) _SCREAMING_SNAKE_CASE : Any = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """strength""": 0.75, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _lowerCAmelCase ( self : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""") pipe.set_progress_bar_config(disable=_A) _SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs() _SCREAMING_SNAKE_CASE : List[str] = pipe(**_A).images _SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_2_8, 1_2_8, 3) _SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087]) assert np.abs(image_slice - expected_slice).max() < 1e-1 def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""") _SCREAMING_SNAKE_CASE : str = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_A) pipe.set_progress_bar_config(disable=_A) _SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs() _SCREAMING_SNAKE_CASE : Optional[int] = pipe(**_A).images _SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) _SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""") _SCREAMING_SNAKE_CASE : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=_A) # warmup pass to apply optimizations _SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs()) _SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs() _SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**_A).images _SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) _SCREAMING_SNAKE_CASE : List[str] = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""") _SCREAMING_SNAKE_CASE : Optional[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=_A) _SCREAMING_SNAKE_CASE : Dict = self.get_dummy_inputs() _SCREAMING_SNAKE_CASE : Optional[int] = pipe(**_A).images _SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) _SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""") _SCREAMING_SNAKE_CASE : Optional[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=_A) _SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_inputs() _SCREAMING_SNAKE_CASE : Any = pipe(**_A).images _SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) _SCREAMING_SNAKE_CASE : int = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""") _SCREAMING_SNAKE_CASE : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=_A) _SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs() _SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(**_A).images _SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) _SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class _snake_case ( unittest.TestCase ): """simple docstring""" @property def _lowerCAmelCase ( self : int): """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = ort.SessionOptions() _SCREAMING_SNAKE_CASE : Tuple = False return options def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""") _SCREAMING_SNAKE_CASE : Any = init_image.resize((7_6_8, 5_1_2)) # using the PNDM scheduler by default _SCREAMING_SNAKE_CASE : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_A) _SCREAMING_SNAKE_CASE : Optional[Any] = """A fantasy landscape, trending on artstation""" _SCREAMING_SNAKE_CASE : Tuple = np.random.RandomState(0) _SCREAMING_SNAKE_CASE : Optional[Any] = pipe( prompt=_A , image=_A , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=_A , output_type="""np""" , ) _SCREAMING_SNAKE_CASE : str = output.images _SCREAMING_SNAKE_CASE : Dict = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 7_6_8, 3) _SCREAMING_SNAKE_CASE : Any = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""") _SCREAMING_SNAKE_CASE : Dict = init_image.resize((7_6_8, 5_1_2)) _SCREAMING_SNAKE_CASE : Optional[int] = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""") _SCREAMING_SNAKE_CASE : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_A , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_A) _SCREAMING_SNAKE_CASE : Any = """A fantasy landscape, trending on artstation""" _SCREAMING_SNAKE_CASE : int = np.random.RandomState(0) _SCREAMING_SNAKE_CASE : List[str] = pipe( prompt=_A , image=_A , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=_A , output_type="""np""" , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = output.images _SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 7_6_8, 3) _SCREAMING_SNAKE_CASE : Tuple = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
635
"""simple docstring""" import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def lowerCamelCase_(__SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=1_026 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , __SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , )-> Union[str, Any]: set_seed(3 ) # generate train_data and objective_set _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = generate_datasets( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , number=__SCREAMING_SNAKE_CASE , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? _SCREAMING_SNAKE_CASE : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # load pretrained model _SCREAMING_SNAKE_CASE : Any = load_gpta("""gpt2""" ).to(__SCREAMING_SNAKE_CASE ) print("""computing perplexity on objective set""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).item() print("""perplexity on objective set:""" , __SCREAMING_SNAKE_CASE ) # collect igf pairs and save to file demo.jbl collect_objective_set(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=15 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE="igf_model.pt" , )-> Optional[int]: set_seed(42 ) # Load pre-trained model _SCREAMING_SNAKE_CASE : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" ) # Initialize secondary learner to use embedding weights of model _SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(__SCREAMING_SNAKE_CASE ) # Train secondary learner _SCREAMING_SNAKE_CASE : Any = train_secondary_learner( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_epochs=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=__SCREAMING_SNAKE_CASE , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_000 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=recopy_gpta , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = RandomSampler(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(__SCREAMING_SNAKE_CASE )) + 1 _SCREAMING_SNAKE_CASE : List[Any] = 0 _SCREAMING_SNAKE_CASE : Any = torch.zeros((1, context_len) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = recopy_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) model.train() if secondary_learner is not None: secondary_learner.to(__SCREAMING_SNAKE_CASE ) secondary_learner.eval() _SCREAMING_SNAKE_CASE : Dict = [] _SCREAMING_SNAKE_CASE : Optional[int] = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = [] _SCREAMING_SNAKE_CASE : int = [] # Compute the performance of the transformer model at the beginning _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) for epoch in range(int(__SCREAMING_SNAKE_CASE ) ): for step, example in enumerate(__SCREAMING_SNAKE_CASE ): torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 ) _SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() _SCREAMING_SNAKE_CASE : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = True if secondary_learner is not None: _SCREAMING_SNAKE_CASE : List[Any] = secondary_learner.forward( torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item() observed_qs.append(float(__SCREAMING_SNAKE_CASE ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: _SCREAMING_SNAKE_CASE : Dict = -1 if predicted_q < threshold: _SCREAMING_SNAKE_CASE : List[str] = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) _SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def lowerCamelCase_()-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" ) # Required parameters parser.add_argument( """--data_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain data files for WikiText.""" , ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help=( """A jbl file containing tokenized data which can be split as objective dataset, """ """train_dataset and test_dataset.""" ) , ) parser.add_argument( """--igf_data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , ) parser.add_argument( """--output_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The output directory where the final fine-tuned model is stored.""" , ) parser.add_argument( """--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument("""--seed""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A seed for reproducible training.""" ) parser.add_argument( """--context_len""" , default=32 , type=__SCREAMING_SNAKE_CASE , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--size_objective_set""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""number of articles that are long enough to be used as our objective set""" , ) parser.add_argument( """--eval_freq""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""secondary model evaluation is triggered at eval_freq""" ) parser.add_argument("""--max_steps""" , default=1_000 , type=__SCREAMING_SNAKE_CASE , help="""To calculate training epochs""" ) parser.add_argument( """--secondary_learner_batch_size""" , default=128 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data for secondary learner""" , ) parser.add_argument( """--batch_size""" , default=16 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data of language model(gpt2) """ ) parser.add_argument( """--eval_interval""" , default=10 , type=__SCREAMING_SNAKE_CASE , help=( """decay the selectivity of our secondary learner filter from""" """1 standard deviation above average to 1 below average after 10 batches""" ) , ) parser.add_argument( """--number""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""The number of examples split to be used as objective_set/test_data""" ) parser.add_argument( """--min_len""" , default=1_026 , type=__SCREAMING_SNAKE_CASE , help="""The minimum length of the article to be used as objective set""" ) parser.add_argument( """--secondary_learner_max_epochs""" , default=15 , type=__SCREAMING_SNAKE_CASE , help="""number of epochs to train secondary learner""" ) parser.add_argument("""--trim""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""truncate the example if it exceeds context length""" ) parser.add_argument( """--threshold""" , default=1.0 , type=__SCREAMING_SNAKE_CASE , help=( """The threshold value used by secondary learner to filter the train_data and allow only""" """ informative data as input to the model""" ) , ) parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__SCREAMING_SNAKE_CASE , help="""finetuned_model_name""" ) parser.add_argument( """--recopy_model""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , ) # Load train data for secondary learner _SCREAMING_SNAKE_CASE : Optional[int] = joblib.load("""data/IGF_values.jbl""" ) # Train secondary learner _SCREAMING_SNAKE_CASE : int = training_secondary_learner( __SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , ) # load pretrained gpt2 model _SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = generate_datasets( context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=__SCREAMING_SNAKE_CASE , secondary_learner=__SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , ) if __name__ == "__main__": main()
635
1
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: _SCREAMING_SNAKE_CASE : str = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """_float_tensor""", """decoder.output_projection.weight""", ] for k in ignore_keys: state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = emb.weight.shape _SCREAMING_SNAKE_CASE : str = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = emb.weight.data return lin_layer def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="facebook/mbart-large-en-ro" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Dict = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""] remove_ignore_keys_(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = state_dict["""encoder.embed_tokens.weight"""].shape[0] _SCREAMING_SNAKE_CASE : str = MBartConfig.from_pretrained(__SCREAMING_SNAKE_CASE , vocab_size=__SCREAMING_SNAKE_CASE ) if mbart_aa and finetuned: _SCREAMING_SNAKE_CASE : List[str] = """relu""" _SCREAMING_SNAKE_CASE : Any = state_dict["""decoder.embed_tokens.weight"""] _SCREAMING_SNAKE_CASE : Dict = MBartForConditionalGeneration(__SCREAMING_SNAKE_CASE ) model.model.load_state_dict(__SCREAMING_SNAKE_CASE ) if finetuned: _SCREAMING_SNAKE_CASE : Optional[Any] = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
635
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( __snake_case ): """simple docstring""" a = ["image_processor", "tokenizer"] a = "ChineseCLIPImageProcessor" a = ("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , _A : Tuple=None , _A : List[Any]=None , **_A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _A , ) _SCREAMING_SNAKE_CASE : str = kwargs.pop("""feature_extractor""") _SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""") if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""") super().__init__(_A , _A) _SCREAMING_SNAKE_CASE : Dict = self.image_processor def __call__( self : Optional[int] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , **_A : int): """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""") if text is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_A , return_tensors=_A , **_A) if images is not None: _SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(_A , return_tensors=_A , **_A) if text is not None and images is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A) , tensor_type=_A) def _lowerCAmelCase ( self : str , *_A : Any , **_A : Any): """simple docstring""" return self.tokenizer.batch_decode(*_A , **_A) def _lowerCAmelCase ( self : Union[str, Any] , *_A : List[Any] , **_A : Any): """simple docstring""" return self.tokenizer.decode(*_A , **_A) @property def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names _SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def _lowerCAmelCase ( self : List[str]): """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , ) return self.image_processor_class
635
1
"""simple docstring""" from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowerCAmelCase_ = logging.get_logger(__name__) class _snake_case ( __snake_case ): """simple docstring""" a = ["input_features", "attention_mask"] def __init__( self : List[Any] , _A : List[str]=8_0 , _A : Tuple=1_6_0_0_0 , _A : Union[str, Any]=8_0 , _A : Union[str, Any]=0.0 , _A : Optional[int]=True , _A : Optional[int]=True , _A : List[Any]=True , **_A : Dict , ): """simple docstring""" super().__init__(feature_size=_A , sampling_rate=_A , padding_value=_A , **_A) _SCREAMING_SNAKE_CASE : List[str] = num_mel_bins _SCREAMING_SNAKE_CASE : Any = do_ceptral_normalize _SCREAMING_SNAKE_CASE : List[str] = normalize_means _SCREAMING_SNAKE_CASE : Any = normalize_vars _SCREAMING_SNAKE_CASE : Any = True def _lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , ): """simple docstring""" _SCREAMING_SNAKE_CASE : str = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers _SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(_A).unsqueeze(0) _SCREAMING_SNAKE_CASE : List[Any] = ta_kaldi.fbank(_A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate) return features.numpy() @staticmethod def _lowerCAmelCase ( _A : np.ndarray , _A : int , _A : Optional[bool] = True , _A : Optional[bool] = True , _A : float = 0.0 , ): """simple docstring""" if normalize_means: _SCREAMING_SNAKE_CASE : Tuple = x[:input_length].mean(axis=0) _SCREAMING_SNAKE_CASE : Any = np.subtract(_A , _A) if normalize_vars: _SCREAMING_SNAKE_CASE : List[str] = x[:input_length].std(axis=0) _SCREAMING_SNAKE_CASE : List[str] = np.divide(_A , _A) if input_length < x.shape[0]: _SCREAMING_SNAKE_CASE : Tuple = padding_value # make sure array is in float32 _SCREAMING_SNAKE_CASE : List[str] = x.astype(np.floataa) return x def _lowerCAmelCase ( self : List[str] , _A : List[np.ndarray] , _A : Optional[np.ndarray] = None): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(_A , _A , self.normalize_means , self.normalize_vars , self.padding_value) for x, n in zip(_A , _A) ] def __call__( self : Tuple , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : Union[str, Any] , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""") else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""") _SCREAMING_SNAKE_CASE : Tuple = isinstance(_A , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""") _SCREAMING_SNAKE_CASE : Any = is_batched_numpy or ( isinstance(_A , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: _SCREAMING_SNAKE_CASE : int = [np.asarray(_A , dtype=np.floataa) for speech in raw_speech] elif not is_batched and not isinstance(_A , np.ndarray): _SCREAMING_SNAKE_CASE : Tuple = np.asarray(_A , dtype=np.floataa) elif isinstance(_A , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): _SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa) # always return batch if not is_batched: _SCREAMING_SNAKE_CASE : List[Any] = [raw_speech] # extract fbank features _SCREAMING_SNAKE_CASE : List[str] = [self._extract_fbank_features(_A) for waveform in raw_speech] # convert into correct format for padding _SCREAMING_SNAKE_CASE : Union[str, Any] = BatchFeature({"""input_features""": features}) _SCREAMING_SNAKE_CASE : List[Any] = self.pad( _A , padding=_A , max_length=_A , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=_A , **_A , ) # make sure list is in array format _SCREAMING_SNAKE_CASE : str = padded_inputs.get("""input_features""") if isinstance(input_features[0] , _A): _SCREAMING_SNAKE_CASE : Tuple = [np.asarray(_A , dtype=np.floataa) for feature in input_features] _SCREAMING_SNAKE_CASE : Optional[int] = padded_inputs.get("""attention_mask""") if attention_mask is not None: _SCREAMING_SNAKE_CASE : List[str] = [np.asarray(_A , dtype=np.intaa) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: _SCREAMING_SNAKE_CASE : Dict = ( np.array(_A , dtype=np.intaa) if self._get_padding_strategies(_A , max_length=_A) is not PaddingStrategy.DO_NOT_PAD else None ) _SCREAMING_SNAKE_CASE : Tuple = self.normalize( padded_inputs["""input_features"""] , attention_mask=_A) if return_tensors is not None: _SCREAMING_SNAKE_CASE : List[Any] = padded_inputs.convert_to_tensors(_A) return padded_inputs
635
"""simple docstring""" import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = ['''model.decoder.embed_positions.weights'''] def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: if "emb" in name: _SCREAMING_SNAKE_CASE : List[Any] = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: _SCREAMING_SNAKE_CASE : List[str] = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: _SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: _SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: _SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: _SCREAMING_SNAKE_CASE : int = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: _SCREAMING_SNAKE_CASE : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: _SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: _SCREAMING_SNAKE_CASE : Tuple = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: _SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: _SCREAMING_SNAKE_CASE : str = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple[Dict, Dict]: _SCREAMING_SNAKE_CASE : str = list(state_dict.keys() ) _SCREAMING_SNAKE_CASE : Tuple = {} for key in keys: _SCREAMING_SNAKE_CASE : Dict = state_dict.pop(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = rename_keys(__SCREAMING_SNAKE_CASE ) if "in_proj_weight" in key: # split fused qkv proj _SCREAMING_SNAKE_CASE : str = val[:hidden_size, :] _SCREAMING_SNAKE_CASE : Any = val[hidden_size : 2 * hidden_size, :] _SCREAMING_SNAKE_CASE : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: _SCREAMING_SNAKE_CASE : int = val else: _SCREAMING_SNAKE_CASE : Dict = val return state_dict, enc_dec_proj_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> MusicgenDecoderConfig: if checkpoint == "small": # default config values _SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 _SCREAMING_SNAKE_CASE : str = 24 _SCREAMING_SNAKE_CASE : Any = 16 elif checkpoint == "medium": _SCREAMING_SNAKE_CASE : Dict = 1_536 _SCREAMING_SNAKE_CASE : Union[str, Any] = 48 _SCREAMING_SNAKE_CASE : Optional[Any] = 24 elif checkpoint == "large": _SCREAMING_SNAKE_CASE : List[Any] = 2_048 _SCREAMING_SNAKE_CASE : Optional[int] = 48 _SCREAMING_SNAKE_CASE : str = 32 else: raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = MusicgenDecoderConfig( hidden_size=__SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=__SCREAMING_SNAKE_CASE , num_attention_heads=__SCREAMING_SNAKE_CASE , ) return config @torch.no_grad() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="cpu" )-> str: _SCREAMING_SNAKE_CASE : str = MusicGen.get_pretrained(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = decoder_config_from_checkpoint(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = fairseq_model.lm.state_dict() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = rename_state_dict( __SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size ) _SCREAMING_SNAKE_CASE : Tuple = TaEncoderModel.from_pretrained("""t5-base""" ) _SCREAMING_SNAKE_CASE : List[Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) _SCREAMING_SNAKE_CASE : str = MusicgenForCausalLM(__SCREAMING_SNAKE_CASE ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" ) if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model _SCREAMING_SNAKE_CASE : Dict = MusicgenForConditionalGeneration(text_encoder=__SCREAMING_SNAKE_CASE , audio_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__SCREAMING_SNAKE_CASE ) # check we can do a forward pass _SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) _SCREAMING_SNAKE_CASE : Dict = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits if logits.shape != (8, 1, 2_048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""t5-base""" ) _SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) _SCREAMING_SNAKE_CASE : Optional[int] = MusicgenProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE ) # set the appropriate bos/pad token ids _SCREAMING_SNAKE_CASE : Optional[Any] = 2_048 _SCREAMING_SNAKE_CASE : List[Any] = 2_048 # set other default generation config params _SCREAMING_SNAKE_CASE : Any = int(30 * audio_encoder.config.frame_rate ) _SCREAMING_SNAKE_CASE : Tuple = True _SCREAMING_SNAKE_CASE : int = 3.0 if pytorch_dump_folder is not None: Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if repo_id: logger.info(F"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(__SCREAMING_SNAKE_CASE ) processor.push_to_hub(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint''', default='''small''', type=str, help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''', ) parser.add_argument( '''--pytorch_dump_folder''', required=True, default=None, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) parser.add_argument( '''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.''' ) lowerCAmelCase_ = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
635
1
"""simple docstring""" import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml lowerCAmelCase_ = NewType('''DataClass''', Any) lowerCAmelCase_ = NewType('''DataClassType''', Any) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Any: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Callable[[str], Any]: _SCREAMING_SNAKE_CASE : Union[str, Any] = {str(__SCREAMING_SNAKE_CASE ): choice for choice in choices} return lambda __SCREAMING_SNAKE_CASE : str_to_choice.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_(*, __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = dataclasses.MISSING , __SCREAMING_SNAKE_CASE = dataclasses.MISSING , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , )-> dataclasses.Field: if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls _SCREAMING_SNAKE_CASE : Optional[int] = {} if aliases is not None: _SCREAMING_SNAKE_CASE : Dict = aliases if help is not None: _SCREAMING_SNAKE_CASE : Optional[int] = help return dataclasses.field(metadata=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , default_factory=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) class _snake_case ( __snake_case ): """simple docstring""" a = 42 def __init__( self : Union[str, Any] , _A : Union[DataClassType, Iterable[DataClassType]] , **_A : str): """simple docstring""" if "formatter_class" not in kwargs: _SCREAMING_SNAKE_CASE : Optional[int] = ArgumentDefaultsHelpFormatter super().__init__(**_A) if dataclasses.is_dataclass(_A): _SCREAMING_SNAKE_CASE : List[str] = [dataclass_types] _SCREAMING_SNAKE_CASE : Optional[Any] = list(_A) for dtype in self.dataclass_types: self._add_dataclass_arguments(_A) @staticmethod def _lowerCAmelCase ( _A : ArgumentParser , _A : dataclasses.Field): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = f"""--{field.name}""" _SCREAMING_SNAKE_CASE : int = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , _A): raise RuntimeError( """Unresolved type detected, which should have been done with the help of """ """`typing.get_type_hints` method by default""") _SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop("""aliases""" , []) if isinstance(_A , _A): _SCREAMING_SNAKE_CASE : int = [aliases] _SCREAMING_SNAKE_CASE : Tuple = getattr(field.type , """__origin__""" , field.type) if origin_type is Union or (hasattr(_A , """UnionType""") and isinstance(_A , types.UnionType)): if str not in field.type.__args__ and ( len(field.type.__args__) != 2 or type(_A) not in field.type.__args__ ): raise ValueError( """Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because""" """ the argument parser only supports one type per argument.""" f""" Problem encountered in field '{field.name}'.""") if type(_A) not in field.type.__args__: # filter `str` in Union _SCREAMING_SNAKE_CASE : Union[str, Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] _SCREAMING_SNAKE_CASE : Optional[Any] = getattr(field.type , """__origin__""" , field.type) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) _SCREAMING_SNAKE_CASE : int = ( field.type.__args__[0] if isinstance(_A , field.type.__args__[1]) else field.type.__args__[1] ) _SCREAMING_SNAKE_CASE : Optional[Any] = getattr(field.type , """__origin__""" , field.type) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) _SCREAMING_SNAKE_CASE : List[Any] = {} if origin_type is Literal or (isinstance(field.type , _A) and issubclass(field.type , _A)): if origin_type is Literal: _SCREAMING_SNAKE_CASE : Optional[Any] = field.type.__args__ else: _SCREAMING_SNAKE_CASE : int = [x.value for x in field.type] _SCREAMING_SNAKE_CASE : List[str] = make_choice_type_function(kwargs["""choices"""]) if field.default is not dataclasses.MISSING: _SCREAMING_SNAKE_CASE : Any = field.default else: _SCREAMING_SNAKE_CASE : Union[str, Any] = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument _SCREAMING_SNAKE_CASE : int = copy(_A) # Hack because type=bool in argparse does not behave as we want. _SCREAMING_SNAKE_CASE : Optional[Any] = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. _SCREAMING_SNAKE_CASE : Dict = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way _SCREAMING_SNAKE_CASE : str = default # This tells argparse we accept 0 or 1 value after --field_name _SCREAMING_SNAKE_CASE : str = """?""" # This is the value that will get picked if we do --field_name (without value) _SCREAMING_SNAKE_CASE : List[Any] = True elif isclass(_A) and issubclass(_A , _A): _SCREAMING_SNAKE_CASE : Tuple = field.type.__args__[0] _SCREAMING_SNAKE_CASE : Optional[int] = """+""" if field.default_factory is not dataclasses.MISSING: _SCREAMING_SNAKE_CASE : Union[str, Any] = field.default_factory() elif field.default is dataclasses.MISSING: _SCREAMING_SNAKE_CASE : Optional[Any] = True else: _SCREAMING_SNAKE_CASE : List[str] = field.type if field.default is not dataclasses.MISSING: _SCREAMING_SNAKE_CASE : int = field.default elif field.default_factory is not dataclasses.MISSING: _SCREAMING_SNAKE_CASE : Union[str, Any] = field.default_factory() else: _SCREAMING_SNAKE_CASE : Any = True parser.add_argument(_A , *_A , **_A) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): _SCREAMING_SNAKE_CASE : Union[str, Any] = False parser.add_argument(f"""--no_{field.name}""" , action="""store_false""" , dest=field.name , **_A) def _lowerCAmelCase ( self : Optional[Any] , _A : DataClassType): """simple docstring""" if hasattr(_A , """_argument_group_name"""): _SCREAMING_SNAKE_CASE : List[str] = self.add_argument_group(dtype._argument_group_name) else: _SCREAMING_SNAKE_CASE : str = self try: _SCREAMING_SNAKE_CASE : Dict[str, type] = get_type_hints(_A) except NameError: raise RuntimeError( f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ """removing line of `from __future__ import annotations` which opts in Postponed """ """Evaluation of Annotations (PEP 563)""") except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(_A): _SCREAMING_SNAKE_CASE : List[str] = """.""".join(map(_A , sys.version_info[:3])) raise RuntimeError( f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ """line of `from __future__ import annotations` which opts in union types as """ """`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """ """support Python versions that lower than 3.10, you need to use """ """`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """ """`X | None`.""") from ex raise for field in dataclasses.fields(_A): if not field.init: continue _SCREAMING_SNAKE_CASE : Any = type_hints[field.name] self._parse_dataclass_field(_A , _A) def _lowerCAmelCase ( self : Any , _A : Union[str, Any]=None , _A : Union[str, Any]=False , _A : Union[str, Any]=True , _A : Union[str, Any]=None , _A : int=None , ): """simple docstring""" if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)): _SCREAMING_SNAKE_CASE : List[str] = [] if args_filename: args_files.append(Path(_A)) elif look_for_args_file and len(sys.argv): args_files.append(Path(sys.argv[0]).with_suffix(""".args""")) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values _SCREAMING_SNAKE_CASE : int = ArgumentParser() args_file_parser.add_argument(_A , type=_A , action="""append""") # Use only remaining args for further parsing (remove the args_file_flag) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = args_file_parser.parse_known_args(args=_A) _SCREAMING_SNAKE_CASE : List[str] = vars(_A).get(args_file_flag.lstrip("""-""") , _A) if cmd_args_file_paths: args_files.extend([Path(_A) for p in cmd_args_file_paths]) _SCREAMING_SNAKE_CASE : Optional[Any] = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last _SCREAMING_SNAKE_CASE : str = file_args + args if args is not None else file_args + sys.argv[1:] _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self.parse_known_args(args=_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for dtype in self.dataclass_types: _SCREAMING_SNAKE_CASE : Dict = {f.name for f in dataclasses.fields(_A) if f.init} _SCREAMING_SNAKE_CASE : List[str] = {k: v for k, v in vars(_A).items() if k in keys} for k in keys: delattr(_A , _A) _SCREAMING_SNAKE_CASE : Optional[Any] = dtype(**_A) outputs.append(_A) if len(namespace.__dict__) > 0: # additional namespace. outputs.append(_A) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""") return (*outputs,) def _lowerCAmelCase ( self : List[Any] , _A : Dict[str, Any] , _A : bool = False): """simple docstring""" _SCREAMING_SNAKE_CASE : str = set(args.keys()) _SCREAMING_SNAKE_CASE : Dict = [] for dtype in self.dataclass_types: _SCREAMING_SNAKE_CASE : Union[str, Any] = {f.name for f in dataclasses.fields(_A) if f.init} _SCREAMING_SNAKE_CASE : Optional[Any] = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys()) _SCREAMING_SNAKE_CASE : Optional[int] = dtype(**_A) outputs.append(_A) if not allow_extra_keys and unused_keys: raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(_A)}""") return tuple(_A) def _lowerCAmelCase ( self : Optional[int] , _A : str , _A : bool = False): """simple docstring""" with open(Path(_A) , encoding="""utf-8""") as open_json_file: _SCREAMING_SNAKE_CASE : List[str] = json.loads(open_json_file.read()) _SCREAMING_SNAKE_CASE : List[Any] = self.parse_dict(_A , allow_extra_keys=_A) return tuple(_A) def _lowerCAmelCase ( self : Dict , _A : str , _A : bool = False): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.parse_dict(yaml.safe_load(Path(_A).read_text()) , allow_extra_keys=_A) return tuple(_A)
635
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''', # See all SEW models at https://huggingface.co/models?filter=sew } class _snake_case ( __snake_case ): """simple docstring""" a = "sew" def __init__( self : List[Any] , _A : Tuple=3_2 , _A : str=7_6_8 , _A : Dict=1_2 , _A : Tuple=1_2 , _A : Optional[Any]=3_0_7_2 , _A : List[str]=2 , _A : Dict="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.0 , _A : str=0.1 , _A : Tuple=0.1 , _A : Optional[int]=0.02 , _A : Dict=1e-5 , _A : str="group" , _A : Tuple="gelu" , _A : Union[str, Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _A : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Tuple=False , _A : Tuple=1_2_8 , _A : int=1_6 , _A : Union[str, Any]=True , _A : Optional[Any]=0.05 , _A : List[Any]=1_0 , _A : Union[str, Any]=2 , _A : Tuple=0.0 , _A : Union[str, Any]=1_0 , _A : Optional[int]=0 , _A : Union[str, Any]="mean" , _A : Optional[int]=False , _A : List[Any]=False , _A : int=2_5_6 , _A : str=0 , _A : Optional[int]=1 , _A : List[Any]=2 , **_A : Dict , ): """simple docstring""" super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A) _SCREAMING_SNAKE_CASE : str = hidden_size _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation _SCREAMING_SNAKE_CASE : Dict = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : str = conv_bias _SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embeddings _SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups _SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim) _SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers _SCREAMING_SNAKE_CASE : List[str] = intermediate_size _SCREAMING_SNAKE_CASE : str = squeeze_factor _SCREAMING_SNAKE_CASE : Dict = hidden_act _SCREAMING_SNAKE_CASE : str = num_attention_heads _SCREAMING_SNAKE_CASE : Dict = hidden_dropout _SCREAMING_SNAKE_CASE : Tuple = attention_dropout _SCREAMING_SNAKE_CASE : int = activation_dropout _SCREAMING_SNAKE_CASE : Any = feat_proj_dropout _SCREAMING_SNAKE_CASE : str = final_dropout _SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop _SCREAMING_SNAKE_CASE : Any = layer_norm_eps _SCREAMING_SNAKE_CASE : int = initializer_range _SCREAMING_SNAKE_CASE : List[Any] = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""") # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment _SCREAMING_SNAKE_CASE : List[Any] = mask_time_prob _SCREAMING_SNAKE_CASE : List[str] = mask_time_length _SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_prob _SCREAMING_SNAKE_CASE : int = mask_feature_length _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks # ctc loss _SCREAMING_SNAKE_CASE : int = ctc_loss_reduction _SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity # sequence classification _SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum _SCREAMING_SNAKE_CASE : List[str] = classifier_proj_size @property def _lowerCAmelCase ( self : Any): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1)
635
1
"""simple docstring""" from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1e-12 )-> Optional[int]: _SCREAMING_SNAKE_CASE : Dict = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__SCREAMING_SNAKE_CASE , axis=1 ) , a_min=__SCREAMING_SNAKE_CASE ) ).T _SCREAMING_SNAKE_CASE : Any = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__SCREAMING_SNAKE_CASE , axis=1 ) , a_min=__SCREAMING_SNAKE_CASE ) ).T return jnp.matmul(__SCREAMING_SNAKE_CASE , norm_emb_a.T ) class _snake_case ( nn.Module ): """simple docstring""" a = 42 a = jnp.floataa def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = FlaxCLIPVisionModule(self.config.vision_config) _SCREAMING_SNAKE_CASE : Tuple = nn.Dense(self.config.projection_dim , use_bias=_A , dtype=self.dtype) _SCREAMING_SNAKE_CASE : Dict = self.param("""concept_embeds""" , jax.nn.initializers.ones , (1_7, self.config.projection_dim)) _SCREAMING_SNAKE_CASE : Any = self.param( """special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim)) _SCREAMING_SNAKE_CASE : Dict = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (1_7,)) _SCREAMING_SNAKE_CASE : int = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,)) def __call__( self : List[Any] , _A : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self.vision_model(_A)[1] _SCREAMING_SNAKE_CASE : Optional[int] = self.visual_projection(_A) _SCREAMING_SNAKE_CASE : str = jax_cosine_distance(_A , self.special_care_embeds) _SCREAMING_SNAKE_CASE : Any = jax_cosine_distance(_A , self.concept_embeds) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs _SCREAMING_SNAKE_CASE : Dict = 0.0 _SCREAMING_SNAKE_CASE : Optional[int] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment _SCREAMING_SNAKE_CASE : Tuple = jnp.round(_A , 3) _SCREAMING_SNAKE_CASE : List[str] = jnp.any(special_scores > 0 , axis=1 , keepdims=_A) # Use a lower threshold if an image has any special care concept _SCREAMING_SNAKE_CASE : List[Any] = is_special_care * 0.01 _SCREAMING_SNAKE_CASE : Dict = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment _SCREAMING_SNAKE_CASE : int = jnp.round(_A , 3) _SCREAMING_SNAKE_CASE : Optional[int] = jnp.any(concept_scores > 0 , axis=1) return has_nsfw_concepts class _snake_case ( __snake_case ): """simple docstring""" a = CLIPConfig a = "clip_input" a = FlaxStableDiffusionSafetyCheckerModule def __init__( self : Optional[int] , _A : CLIPConfig , _A : Optional[Tuple] = None , _A : int = 0 , _A : jnp.dtype = jnp.floataa , _A : bool = True , **_A : Optional[int] , ): """simple docstring""" if input_shape is None: _SCREAMING_SNAKE_CASE : Optional[Any] = (1, 2_2_4, 2_2_4, 3) _SCREAMING_SNAKE_CASE : Any = self.module_class(config=_A , dtype=_A , **_A) super().__init__(_A , _A , input_shape=_A , seed=_A , dtype=_A , _do_init=_do_init) def _lowerCAmelCase ( self : Dict , _A : jax.random.KeyArray , _A : Tuple , _A : FrozenDict = None): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = jax.random.normal(_A , _A) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_A) _SCREAMING_SNAKE_CASE : Tuple = {"""params""": params_rng, """dropout""": dropout_rng} _SCREAMING_SNAKE_CASE : Optional[int] = self.module.init(_A , _A)["""params"""] return random_params def __call__( self : Tuple , _A : Union[str, Any] , _A : dict = None , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = jnp.transpose(_A , (0, 2, 3, 1)) return self.module.apply( {"""params""": params or self.params} , jnp.array(_A , dtype=jnp.floataa) , rngs={} , )
635
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''', '''UniSpeechForCTC''', '''UniSpeechForPreTraining''', '''UniSpeechForSequenceClassification''', '''UniSpeechModel''', '''UniSpeechPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
1
"""simple docstring""" from functools import lru_cache def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> set: _SCREAMING_SNAKE_CASE : int = 2 _SCREAMING_SNAKE_CASE : Union[str, Any] = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__SCREAMING_SNAKE_CASE ) if n > 1: factors.add(__SCREAMING_SNAKE_CASE ) return factors @lru_cache def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: return len(unique_prime_factors(__SCREAMING_SNAKE_CASE ) ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool: return len(set(__SCREAMING_SNAKE_CASE ) ) in (0, 1) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> list: _SCREAMING_SNAKE_CASE : Tuple = 2 while True: # Increment each value of a generated range _SCREAMING_SNAKE_CASE : Any = [base + i for i in range(__SCREAMING_SNAKE_CASE )] # Run elements through out unique_prime_factors function # Append our target number to the end. _SCREAMING_SNAKE_CASE : List[str] = [upf_len(__SCREAMING_SNAKE_CASE ) for x in group] checker.append(__SCREAMING_SNAKE_CASE ) # If all numbers in the list are equal, return the group variable. if equality(__SCREAMING_SNAKE_CASE ): return group # Increment our base variable by 1 base += 1 def lowerCamelCase_(__SCREAMING_SNAKE_CASE = 4 )-> int: _SCREAMING_SNAKE_CASE : Tuple = run(__SCREAMING_SNAKE_CASE ) return results[0] if len(__SCREAMING_SNAKE_CASE ) else None if __name__ == "__main__": print(solution())
635
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : int = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : List[Any] = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str: if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = parquet_path elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] _SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=("train",) )-> Union[str, Any]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for split in splits: _SCREAMING_SNAKE_CASE : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : Dict = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: _SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[str] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : int = ParquetDatasetReader({"""train""": parquet_path} , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: if split: _SCREAMING_SNAKE_CASE : Union[str, Any] = {split: parquet_path} else: _SCREAMING_SNAKE_CASE : Optional[int] = """train""" _SCREAMING_SNAKE_CASE : Any = {"""train""": parquet_path, """test""": parquet_path} _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: _SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / """foo.parquet""" ) _SCREAMING_SNAKE_CASE : str = pf.read() assert dataset.data.table == output_table def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Dict = str(shared_datadir / """test_image_rgb.jpg""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = {"""image""": [image_path]} _SCREAMING_SNAKE_CASE : Optional[Any] = Features({"""image""": Image()} ) _SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__SCREAMING_SNAKE_CASE ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: assert get_writer_batch_size(__SCREAMING_SNAKE_CASE ) == expected
635
1
"""simple docstring""" from typing import Any import numpy as np def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool: return np.array_equal(__SCREAMING_SNAKE_CASE , matrix.conjugate().T ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: _SCREAMING_SNAKE_CASE : Optional[int] = v.conjugate().T _SCREAMING_SNAKE_CASE : Optional[int] = v_star.dot(__SCREAMING_SNAKE_CASE ) assert isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) return (v_star_dot.dot(__SCREAMING_SNAKE_CASE )) / (v_star.dot(__SCREAMING_SNAKE_CASE )) def lowerCamelCase_()-> None: _SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) _SCREAMING_SNAKE_CASE : int = np.array([[1], [2], [3]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" print(rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" assert rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
635
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""only integers accepted as input""" ) else: _SCREAMING_SNAKE_CASE : List[Any] = str(abs(__SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : List[str] = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )] for index in range(len(__SCREAMING_SNAKE_CASE ) ): num_transpositions[index].pop(__SCREAMING_SNAKE_CASE ) return max( int("""""".join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
635
1
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Dict = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : str = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : List[Any] = -1 _SCREAMING_SNAKE_CASE : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Any = tokenizer.decode(greedy_ids[0]) _SCREAMING_SNAKE_CASE : List[Any] = TextIteratorStreamer(_A) _SCREAMING_SNAKE_CASE : Any = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[Any] = Thread(target=model.generate , kwargs=_A) thread.start() _SCREAMING_SNAKE_CASE : Any = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(_A , _A) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : str = greedy_ids[:, input_ids.shape[1] :] _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(new_greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A , skip_prompt=_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : Optional[int] = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""distilgpt2""") _SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""").to(_A) _SCREAMING_SNAKE_CASE : int = -1 _SCREAMING_SNAKE_CASE : List[str] = torch.ones((1, 5) , device=_A).long() * model.config.bos_token_id with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Optional[int] = TextStreamer(_A , skip_special_tokens=_A) model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _SCREAMING_SNAKE_CASE : Optional[Any] = cs.out[:-1] # Remove the final "\n" _SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , return_tensors="""pt""") self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1)) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Tuple = -1 _SCREAMING_SNAKE_CASE : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : int = TextIteratorStreamer(_A , timeout=0.001) _SCREAMING_SNAKE_CASE : List[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[str] = Thread(target=model.generate , kwargs=_A) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_A): _SCREAMING_SNAKE_CASE : str = """""" for new_text in streamer: streamer_text += new_text
635
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Dict = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : str = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : List[Any] = -1 _SCREAMING_SNAKE_CASE : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Any = tokenizer.decode(greedy_ids[0]) _SCREAMING_SNAKE_CASE : List[Any] = TextIteratorStreamer(_A) _SCREAMING_SNAKE_CASE : Any = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[Any] = Thread(target=model.generate , kwargs=_A) thread.start() _SCREAMING_SNAKE_CASE : Any = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(_A , _A) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : str = greedy_ids[:, input_ids.shape[1] :] _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(new_greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A , skip_prompt=_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : Optional[int] = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""distilgpt2""") _SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""").to(_A) _SCREAMING_SNAKE_CASE : int = -1 _SCREAMING_SNAKE_CASE : List[str] = torch.ones((1, 5) , device=_A).long() * model.config.bos_token_id with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Optional[int] = TextStreamer(_A , skip_special_tokens=_A) model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _SCREAMING_SNAKE_CASE : Optional[Any] = cs.out[:-1] # Remove the final "\n" _SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , return_tensors="""pt""") self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1)) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Tuple = -1 _SCREAMING_SNAKE_CASE : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : int = TextIteratorStreamer(_A , timeout=0.001) _SCREAMING_SNAKE_CASE : List[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[str] = Thread(target=model.generate , kwargs=_A) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_A): _SCREAMING_SNAKE_CASE : str = """""" for new_text in streamer: streamer_text += new_text
635
1
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : int = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : List[Any] = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str: if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = parquet_path elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] _SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=("train",) )-> Union[str, Any]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for split in splits: _SCREAMING_SNAKE_CASE : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : Dict = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: _SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[str] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : int = ParquetDatasetReader({"""train""": parquet_path} , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: if split: _SCREAMING_SNAKE_CASE : Union[str, Any] = {split: parquet_path} else: _SCREAMING_SNAKE_CASE : Optional[int] = """train""" _SCREAMING_SNAKE_CASE : Any = {"""train""": parquet_path, """test""": parquet_path} _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: _SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / """foo.parquet""" ) _SCREAMING_SNAKE_CASE : str = pf.read() assert dataset.data.table == output_table def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Dict = str(shared_datadir / """test_image_rgb.jpg""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = {"""image""": [image_path]} _SCREAMING_SNAKE_CASE : Optional[Any] = Features({"""image""": Image()} ) _SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__SCREAMING_SNAKE_CASE ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: assert get_writer_batch_size(__SCREAMING_SNAKE_CASE ) == expected
635
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class _snake_case ( __snake_case ): """simple docstring""" a = "facebook/bart-large-mnli" a = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) a = "text_classifier" a = AutoTokenizer a = AutoModelForSequenceClassification a = ["text", ["text"]] a = ["text"] def _lowerCAmelCase ( self : int): """simple docstring""" super().setup() _SCREAMING_SNAKE_CASE : Any = self.model.config _SCREAMING_SNAKE_CASE : Any = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("""entail"""): _SCREAMING_SNAKE_CASE : List[Any] = int(_A) if self.entailment_id == -1: raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""") def _lowerCAmelCase ( self : Optional[Any] , _A : Tuple , _A : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = labels return self.pre_processor( [text] * len(_A) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , ) def _lowerCAmelCase ( self : Tuple , _A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = outputs.logits _SCREAMING_SNAKE_CASE : List[Any] = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
635
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''google/vivit-b-16x2-kinetics400''': ( '''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json''' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class _snake_case ( __snake_case ): """simple docstring""" a = "vivit" def __init__( self : str , _A : Union[str, Any]=2_2_4 , _A : int=3_2 , _A : Tuple=[2, 1_6, 1_6] , _A : Union[str, Any]=3 , _A : str=7_6_8 , _A : int=1_2 , _A : Optional[Any]=1_2 , _A : int=3_0_7_2 , _A : int="gelu_fast" , _A : List[str]=0.0 , _A : Union[str, Any]=0.0 , _A : Tuple=0.02 , _A : Optional[Any]=1e-06 , _A : Optional[int]=True , **_A : List[Any] , ): """simple docstring""" _SCREAMING_SNAKE_CASE : str = hidden_size _SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers _SCREAMING_SNAKE_CASE : Dict = num_attention_heads _SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size _SCREAMING_SNAKE_CASE : str = hidden_act _SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob _SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : Tuple = initializer_range _SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps _SCREAMING_SNAKE_CASE : Any = image_size _SCREAMING_SNAKE_CASE : int = num_frames _SCREAMING_SNAKE_CASE : Dict = tubelet_size _SCREAMING_SNAKE_CASE : List[str] = num_channels _SCREAMING_SNAKE_CASE : Any = qkv_bias super().__init__(**_A)
635
"""simple docstring""" import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class _snake_case ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""") _SCREAMING_SNAKE_CASE : str = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""") model.to(_A) from datasets import load_dataset _SCREAMING_SNAKE_CASE : Any = load_dataset("""nielsr/rvlcdip-demo""") _SCREAMING_SNAKE_CASE : Any = dataset["""train"""][0]["""image"""].convert("""RGB""") _SCREAMING_SNAKE_CASE : str = image_processor(_A , return_tensors="""pt""").to(_A) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE : Any = model(**_A) _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_6)) self.assertEqual(logits.shape , _A) _SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=_A , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4))
635
1
"""simple docstring""" import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class _snake_case ( ctypes.Structure ): """simple docstring""" a = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def lowerCamelCase_()-> Any: if os.name == "nt": _SCREAMING_SNAKE_CASE : List[str] = CursorInfo() _SCREAMING_SNAKE_CASE : Tuple = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__SCREAMING_SNAKE_CASE , ctypes.byref(__SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : List[str] = False ctypes.windll.kernelaa.SetConsoleCursorInfo(__SCREAMING_SNAKE_CASE , ctypes.byref(__SCREAMING_SNAKE_CASE ) ) elif os.name == "posix": sys.stdout.write("""\033[?25l""" ) sys.stdout.flush() def lowerCamelCase_()-> Tuple: if os.name == "nt": _SCREAMING_SNAKE_CASE : Union[str, Any] = CursorInfo() _SCREAMING_SNAKE_CASE : Any = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__SCREAMING_SNAKE_CASE , ctypes.byref(__SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : Optional[Any] = True ctypes.windll.kernelaa.SetConsoleCursorInfo(__SCREAMING_SNAKE_CASE , ctypes.byref(__SCREAMING_SNAKE_CASE ) ) elif os.name == "posix": sys.stdout.write("""\033[?25h""" ) sys.stdout.flush() @contextmanager def lowerCamelCase_()-> List[Any]: try: hide_cursor() yield finally: show_cursor()
635
"""simple docstring""" import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class _snake_case ( __snake_case ): """simple docstring""" a = "M-CLIP" def __init__( self : Optional[Any] , _A : List[str]=1_0_2_4 , _A : Union[str, Any]=7_6_8 , **_A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = transformerDimSize _SCREAMING_SNAKE_CASE : List[str] = imageDimSize super().__init__(**_A) class _snake_case ( __snake_case ): """simple docstring""" a = MCLIPConfig def __init__( self : Dict , _A : Optional[Any] , *_A : Any , **_A : Dict): """simple docstring""" super().__init__(_A , *_A , **_A) _SCREAMING_SNAKE_CASE : Tuple = XLMRobertaModel(_A) _SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims) def _lowerCAmelCase ( self : Union[str, Any] , _A : str , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.transformer(input_ids=_A , attention_mask=_A)[0] _SCREAMING_SNAKE_CASE : Optional[Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None] return self.LinearTransformation(_A), embs
635
1
"""simple docstring""" lowerCAmelCase_ = { '''Pillow''': '''Pillow<10.0.0''', '''accelerate''': '''accelerate>=0.20.3''', '''av''': '''av==9.2.0''', '''beautifulsoup4''': '''beautifulsoup4''', '''black''': '''black~=23.1''', '''codecarbon''': '''codecarbon==1.2.0''', '''cookiecutter''': '''cookiecutter==1.7.3''', '''dataclasses''': '''dataclasses''', '''datasets''': '''datasets!=2.5.0''', '''decord''': '''decord==0.6.0''', '''deepspeed''': '''deepspeed>=0.9.3''', '''diffusers''': '''diffusers''', '''dill''': '''dill<0.3.5''', '''evaluate''': '''evaluate>=0.2.0''', '''fairscale''': '''fairscale>0.3''', '''faiss-cpu''': '''faiss-cpu''', '''fastapi''': '''fastapi''', '''filelock''': '''filelock''', '''flax''': '''flax>=0.4.1,<=0.7.0''', '''ftfy''': '''ftfy''', '''fugashi''': '''fugashi>=1.0''', '''GitPython''': '''GitPython<3.1.19''', '''hf-doc-builder''': '''hf-doc-builder>=0.3.0''', '''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''', '''importlib_metadata''': '''importlib_metadata''', '''ipadic''': '''ipadic>=1.0.0,<2.0''', '''isort''': '''isort>=5.5.4''', '''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''', '''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''', '''jieba''': '''jieba''', '''kenlm''': '''kenlm''', '''keras-nlp''': '''keras-nlp>=0.3.1''', '''librosa''': '''librosa''', '''nltk''': '''nltk''', '''natten''': '''natten>=0.14.6''', '''numpy''': '''numpy>=1.17''', '''onnxconverter-common''': '''onnxconverter-common''', '''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''', '''onnxruntime''': '''onnxruntime>=1.4.0''', '''opencv-python''': '''opencv-python''', '''optuna''': '''optuna''', '''optax''': '''optax>=0.0.8,<=0.1.4''', '''packaging''': '''packaging>=20.0''', '''parameterized''': '''parameterized''', '''phonemizer''': '''phonemizer''', '''protobuf''': '''protobuf''', '''psutil''': '''psutil''', '''pyyaml''': '''pyyaml>=5.1''', '''pydantic''': '''pydantic<2''', '''pytest''': '''pytest>=7.2.0''', '''pytest-timeout''': '''pytest-timeout''', '''pytest-xdist''': '''pytest-xdist''', '''python''': '''python>=3.8.0''', '''ray[tune]''': '''ray[tune]''', '''regex''': '''regex!=2019.12.17''', '''requests''': '''requests''', '''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''', '''rjieba''': '''rjieba''', '''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''', '''ruff''': '''ruff>=0.0.241,<=0.0.259''', '''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''', '''sacremoses''': '''sacremoses''', '''safetensors''': '''safetensors>=0.3.1''', '''sagemaker''': '''sagemaker>=2.31.0''', '''scikit-learn''': '''scikit-learn''', '''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''', '''sigopt''': '''sigopt''', '''starlette''': '''starlette''', '''sudachipy''': '''sudachipy>=0.6.6''', '''sudachidict_core''': '''sudachidict_core>=20220729''', '''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''', '''tensorflow''': '''tensorflow>=2.6,<2.14''', '''tensorflow-text''': '''tensorflow-text<2.14''', '''tf2onnx''': '''tf2onnx''', '''timeout-decorator''': '''timeout-decorator''', '''timm''': '''timm''', '''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''', '''torch''': '''torch>=1.9,!=1.12.0''', '''torchaudio''': '''torchaudio''', '''torchvision''': '''torchvision''', '''pyctcdecode''': '''pyctcdecode>=0.4.0''', '''tqdm''': '''tqdm>=4.27''', '''unidic''': '''unidic>=1.0.2''', '''unidic_lite''': '''unidic_lite>=1.0.7''', '''urllib3''': '''urllib3<2.0.0''', '''uvicorn''': '''uvicorn''', }
635
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) _SCREAMING_SNAKE_CASE : int = precision _SCREAMING_SNAKE_CASE : Dict = ceil(precision / 14 ) _SCREAMING_SNAKE_CASE : int = 426_880 * Decimal(10_005 ).sqrt() _SCREAMING_SNAKE_CASE : Union[str, Any] = 1 _SCREAMING_SNAKE_CASE : str = 13_591_409 _SCREAMING_SNAKE_CASE : Tuple = Decimal(__SCREAMING_SNAKE_CASE ) for k in range(1 , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__SCREAMING_SNAKE_CASE ) ** 3) linear_term += 545_140_134 exponential_term *= -262_537_412_640_768_000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(F"The first {n} digits of pi is: {pi(n)}")
635
1
"""simple docstring""" import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _snake_case : """simple docstring""" @staticmethod def _lowerCAmelCase ( *_A : str , **_A : int): """simple docstring""" pass @is_pipeline_test @require_torch @require_vision class _snake_case ( unittest.TestCase ): """simple docstring""" a = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def _lowerCAmelCase ( self : str , _A : List[Any] , _A : str , _A : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""") _SCREAMING_SNAKE_CASE : Union[str, Any] = [ { """image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png"""), """question""": """How many cats are there?""", }, { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """question""": """How many cats are there?""", }, ] return vqa_pipeline, examples def _lowerCAmelCase ( self : int , _A : Tuple , _A : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = vqa_pipeline(_A , top_k=1) self.assertEqual( _A , [ [{"""score""": ANY(_A), """answer""": ANY(_A)}], [{"""score""": ANY(_A), """answer""": ANY(_A)}], ] , ) @require_torch def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""") _SCREAMING_SNAKE_CASE : str = """./tests/fixtures/tests_samples/COCO/000000039769.png""" _SCREAMING_SNAKE_CASE : List[Any] = """How many cats are there?""" _SCREAMING_SNAKE_CASE : Any = vqa_pipeline(image=_A , question="""How many cats are there?""" , top_k=2) self.assertEqual( _A , [{"""score""": ANY(_A), """answer""": ANY(_A)}, {"""score""": ANY(_A), """answer""": ANY(_A)}]) _SCREAMING_SNAKE_CASE : Optional[int] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2) self.assertEqual( _A , [{"""score""": ANY(_A), """answer""": ANY(_A)}, {"""score""": ANY(_A), """answer""": ANY(_A)}]) @slow @require_torch def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""") _SCREAMING_SNAKE_CASE : Union[str, Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png""" _SCREAMING_SNAKE_CASE : Tuple = """How many cats are there?""" _SCREAMING_SNAKE_CASE : Dict = vqa_pipeline(image=_A , question=_A , top_k=2) self.assertEqual( nested_simplify(_A , decimals=4) , [{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]) _SCREAMING_SNAKE_CASE : Any = vqa_pipeline({"""image""": image, """question""": question} , top_k=2) self.assertEqual( nested_simplify(_A , decimals=4) , [{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]) _SCREAMING_SNAKE_CASE : Tuple = vqa_pipeline( [{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2) self.assertEqual( nested_simplify(_A , decimals=4) , [[{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , ) @require_tf @unittest.skip("""Visual question answering not implemented in TF""") def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" pass
635
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _SCREAMING_SNAKE_CASE : Optional[int] = TapasConfig.from_json_file(__SCREAMING_SNAKE_CASE ) # set absolute/relative position embeddings parameter _SCREAMING_SNAKE_CASE : Dict = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _SCREAMING_SNAKE_CASE : str = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WTQ": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : Optional[int] = 4 _SCREAMING_SNAKE_CASE : Any = True # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 0.66_46_94 _SCREAMING_SNAKE_CASE : str = 0.20_79_51 _SCREAMING_SNAKE_CASE : str = 0.12_11_94 _SCREAMING_SNAKE_CASE : List[Any] = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = 0.0_35_25_13 _SCREAMING_SNAKE_CASE : Optional[Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : int = 4 _SCREAMING_SNAKE_CASE : Tuple = False # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 36.45_19 _SCREAMING_SNAKE_CASE : Union[str, Any] = 0.90_34_21 _SCREAMING_SNAKE_CASE : Optional[Any] = 2_22.0_88 _SCREAMING_SNAKE_CASE : Any = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Optional[int] = True _SCREAMING_SNAKE_CASE : Dict = 0.76_31_41 _SCREAMING_SNAKE_CASE : Union[str, Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "TABFACT": _SCREAMING_SNAKE_CASE : int = TapasForSequenceClassification(config=__SCREAMING_SNAKE_CASE ) elif task == "MLM": _SCREAMING_SNAKE_CASE : int = TapasForMaskedLM(config=__SCREAMING_SNAKE_CASE ) elif task == "INTERMEDIATE_PRETRAINING": _SCREAMING_SNAKE_CASE : int = TapasModel(config=__SCREAMING_SNAKE_CASE ) else: raise ValueError(F"""Task {task} not supported.""" ) print(F"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Save pytorch-model (weights and configuration) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Save tokenizer files print(F"""Save tokenizer files to {pytorch_dump_path}""" ) _SCREAMING_SNAKE_CASE : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 ) tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
635
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCAmelCase_ = { '''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTBigCodeForSequenceClassification''', '''GPTBigCodeForTokenClassification''', '''GPTBigCodeForCausalLM''', '''GPTBigCodeModel''', '''GPTBigCodePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
"""simple docstring""" from typing import Any import numpy as np def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool: return np.array_equal(__SCREAMING_SNAKE_CASE , matrix.conjugate().T ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: _SCREAMING_SNAKE_CASE : Optional[int] = v.conjugate().T _SCREAMING_SNAKE_CASE : Optional[int] = v_star.dot(__SCREAMING_SNAKE_CASE ) assert isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) return (v_star_dot.dot(__SCREAMING_SNAKE_CASE )) / (v_star.dot(__SCREAMING_SNAKE_CASE )) def lowerCamelCase_()-> None: _SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) _SCREAMING_SNAKE_CASE : int = np.array([[1], [2], [3]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" print(rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" assert rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
635
1
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _SCREAMING_SNAKE_CASE : Optional[int] = TapasConfig.from_json_file(__SCREAMING_SNAKE_CASE ) # set absolute/relative position embeddings parameter _SCREAMING_SNAKE_CASE : Dict = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _SCREAMING_SNAKE_CASE : str = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WTQ": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : Optional[int] = 4 _SCREAMING_SNAKE_CASE : Any = True # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 0.66_46_94 _SCREAMING_SNAKE_CASE : str = 0.20_79_51 _SCREAMING_SNAKE_CASE : str = 0.12_11_94 _SCREAMING_SNAKE_CASE : List[Any] = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = 0.0_35_25_13 _SCREAMING_SNAKE_CASE : Optional[Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : int = 4 _SCREAMING_SNAKE_CASE : Tuple = False # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 36.45_19 _SCREAMING_SNAKE_CASE : Union[str, Any] = 0.90_34_21 _SCREAMING_SNAKE_CASE : Optional[Any] = 2_22.0_88 _SCREAMING_SNAKE_CASE : Any = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Optional[int] = True _SCREAMING_SNAKE_CASE : Dict = 0.76_31_41 _SCREAMING_SNAKE_CASE : Union[str, Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "TABFACT": _SCREAMING_SNAKE_CASE : int = TapasForSequenceClassification(config=__SCREAMING_SNAKE_CASE ) elif task == "MLM": _SCREAMING_SNAKE_CASE : int = TapasForMaskedLM(config=__SCREAMING_SNAKE_CASE ) elif task == "INTERMEDIATE_PRETRAINING": _SCREAMING_SNAKE_CASE : int = TapasModel(config=__SCREAMING_SNAKE_CASE ) else: raise ValueError(F"""Task {task} not supported.""" ) print(F"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Save pytorch-model (weights and configuration) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Save tokenizer files print(F"""Save tokenizer files to {pytorch_dump_path}""" ) _SCREAMING_SNAKE_CASE : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 ) tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
635
"""simple docstring""" from __future__ import annotations def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative in a semiconductor""" ) elif hole_conc < 0: raise ValueError("""Hole concentration cannot be negative in a semiconductor""" ) elif intrinsic_conc < 0: raise ValueError( """Intrinsic concentration cannot be negative in a semiconductor""" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
635
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class _snake_case ( __snake_case ): """simple docstring""" a = "megatron-bert" def __init__( self : Optional[int] , _A : Any=2_9_0_5_6 , _A : Union[str, Any]=1_0_2_4 , _A : Union[str, Any]=2_4 , _A : Optional[Any]=1_6 , _A : Union[str, Any]=4_0_9_6 , _A : List[Any]="gelu" , _A : Tuple=0.1 , _A : int=0.1 , _A : str=5_1_2 , _A : Dict=2 , _A : Any=0.02 , _A : List[str]=1e-12 , _A : List[str]=0 , _A : Dict="absolute" , _A : Tuple=True , **_A : Union[str, Any] , ): """simple docstring""" super().__init__(pad_token_id=_A , **_A) _SCREAMING_SNAKE_CASE : List[Any] = vocab_size _SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size _SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers _SCREAMING_SNAKE_CASE : Dict = num_attention_heads _SCREAMING_SNAKE_CASE : List[Any] = hidden_act _SCREAMING_SNAKE_CASE : List[str] = intermediate_size _SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob _SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : str = max_position_embeddings _SCREAMING_SNAKE_CASE : List[str] = type_vocab_size _SCREAMING_SNAKE_CASE : str = initializer_range _SCREAMING_SNAKE_CASE : int = layer_norm_eps _SCREAMING_SNAKE_CASE : int = position_embedding_type _SCREAMING_SNAKE_CASE : str = use_cache
635
"""simple docstring""" import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase_ = 16 lowerCAmelCase_ = 32 def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 )-> str: _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetDict( { """train""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """validation""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """test""": dataset["""validation"""], } ) def tokenize_function(__SCREAMING_SNAKE_CASE ): # max_length=None => use the model max length (it's actually the default) _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _SCREAMING_SNAKE_CASE : str = datasets.map( __SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__SCREAMING_SNAKE_CASE ): # On TPU it's best to pad everything to the same length or training will be very slow. _SCREAMING_SNAKE_CASE : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _SCREAMING_SNAKE_CASE : Optional[Any] = 16 elif accelerator.mixed_precision != "no": _SCREAMING_SNAKE_CASE : Any = 8 else: _SCREAMING_SNAKE_CASE : Optional[int] = None return tokenizer.pad( __SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , ) # Instantiate dataloaders. _SCREAMING_SNAKE_CASE : int = DataLoader( tokenized_datasets["""train"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["""validation"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets["""test"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader, test_dataloader def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: # New Code # _SCREAMING_SNAKE_CASE : Union[str, Any] = [] # Download the dataset _SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""glue""" , """mrpc""" ) # Create our splits _SCREAMING_SNAKE_CASE : Dict = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator _SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _SCREAMING_SNAKE_CASE : Tuple = config["""lr"""] _SCREAMING_SNAKE_CASE : Tuple = int(config["""num_epochs"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""seed"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""batch_size"""] ) _SCREAMING_SNAKE_CASE : List[str] = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation _SCREAMING_SNAKE_CASE : Any = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _SCREAMING_SNAKE_CASE : List[str] = batch_size // MAX_GPU_BATCH_SIZE _SCREAMING_SNAKE_CASE : List[str] = MAX_GPU_BATCH_SIZE set_seed(__SCREAMING_SNAKE_CASE ) # New Code # # Create our folds: _SCREAMING_SNAKE_CASE : List[str] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] ) _SCREAMING_SNAKE_CASE : Optional[Any] = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = get_fold_dataloaders( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device ) # Instantiate optimizer _SCREAMING_SNAKE_CASE : int = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE ) # Instantiate scheduler _SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup( optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(__SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = outputs.loss _SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps accelerator.backward(__SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , ) _SCREAMING_SNAKE_CASE : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE ) # New Code # # We also run predictions on the test set at the very end _SCREAMING_SNAKE_CASE : str = [] for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: _SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) _SCREAMING_SNAKE_CASE : List[str] = torch.stack(__SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) _SCREAMING_SNAKE_CASE : int = metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE ) accelerator.print("""Average test metrics from all folds:""" , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_()-> Optional[Any]: _SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) # New Code # parser.add_argument("""--num_folds""" , type=__SCREAMING_SNAKE_CASE , default=3 , help="""The number of splits to perform across the dataset""" ) _SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() _SCREAMING_SNAKE_CASE : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
635
1
"""simple docstring""" from collections.abc import Iterable from typing import Generic, TypeVar lowerCAmelCase_ = TypeVar('''_T''') class _snake_case ( Generic[_T] ): """simple docstring""" def __init__( self : Union[str, Any] , _A : Iterable[_T] | None = None): """simple docstring""" _SCREAMING_SNAKE_CASE : list[_T] = list(iterable or []) _SCREAMING_SNAKE_CASE : list[_T] = [] def __len__( self : List[Any]): """simple docstring""" return len(self._stacka) + len(self._stacka) def __repr__( self : Optional[Any]): """simple docstring""" return f"""Queue({tuple(self._stacka[::-1] + self._stacka)})""" def _lowerCAmelCase ( self : Dict , _A : _T): """simple docstring""" self._stacka.append(_A) def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self._stacka.pop _SCREAMING_SNAKE_CASE : List[str] = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop()) if not self._stacka: raise IndexError("""Queue is empty""") return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
635
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
1
"""simple docstring""" import torch from diffusers import StableDiffusionPipeline lowerCAmelCase_ = '''path-to-your-trained-model''' lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''') lowerCAmelCase_ = '''A photo of sks dog in a bucket''' lowerCAmelCase_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save('''dog-bucket.png''')
635
"""simple docstring""" import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class _snake_case : """simple docstring""" def __init__( self : int , _A : List[Any] , _A : int , _A : int): """simple docstring""" if dst_width < 0 or dst_height < 0: raise ValueError("""Destination width/height should be > 0""") _SCREAMING_SNAKE_CASE : str = img _SCREAMING_SNAKE_CASE : Optional[Any] = img.shape[1] _SCREAMING_SNAKE_CASE : Tuple = img.shape[0] _SCREAMING_SNAKE_CASE : Any = dst_width _SCREAMING_SNAKE_CASE : Any = dst_height _SCREAMING_SNAKE_CASE : Any = self.src_w / self.dst_w _SCREAMING_SNAKE_CASE : Dict = self.src_h / self.dst_h _SCREAMING_SNAKE_CASE : Optional[Any] = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_5_5 ) def _lowerCAmelCase ( self : Tuple): """simple docstring""" for i in range(self.dst_h): for j in range(self.dst_w): _SCREAMING_SNAKE_CASE : Any = self.img[self.get_y(_A)][self.get_x(_A)] def _lowerCAmelCase ( self : int , _A : int): """simple docstring""" return int(self.ratio_x * x) def _lowerCAmelCase ( self : str , _A : int): """simple docstring""" return int(self.ratio_y * y) if __name__ == "__main__": lowerCAmelCase_ , lowerCAmelCase_ = 800, 600 lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1) lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output ) waitKey(0) destroyAllWindows()
635
1
"""simple docstring""" import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup lowerCAmelCase_ = { '''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36''' ''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''' } def lowerCamelCase_(__SCREAMING_SNAKE_CASE = "dhaka" , __SCREAMING_SNAKE_CASE = 5 )-> int: _SCREAMING_SNAKE_CASE : Optional[int] = min(__SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse! _SCREAMING_SNAKE_CASE : Union[str, Any] = { """q""": query, """tbm""": """isch""", """hl""": """en""", """ijn""": """0""", } _SCREAMING_SNAKE_CASE : List[Any] = requests.get("""https://www.google.com/search""" , params=__SCREAMING_SNAKE_CASE , headers=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = BeautifulSoup(html.text , """html.parser""" ) _SCREAMING_SNAKE_CASE : int = """""".join( re.findall(R"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) ) _SCREAMING_SNAKE_CASE : List[Any] = json.dumps(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : str = json.loads(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : str = re.findall( R"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , __SCREAMING_SNAKE_CASE , ) if not matched_google_image_data: return 0 _SCREAMING_SNAKE_CASE : Optional[Any] = re.sub( R"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(__SCREAMING_SNAKE_CASE ) , ) _SCREAMING_SNAKE_CASE : List[str] = re.findall( R"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , __SCREAMING_SNAKE_CASE , ) for index, fixed_full_res_image in enumerate(__SCREAMING_SNAKE_CASE ): if index >= max_images: return index _SCREAMING_SNAKE_CASE : List[str] = bytes(__SCREAMING_SNAKE_CASE , """ascii""" ).decode( """unicode-escape""" ) _SCREAMING_SNAKE_CASE : Optional[int] = bytes(__SCREAMING_SNAKE_CASE , """ascii""" ).decode( """unicode-escape""" ) _SCREAMING_SNAKE_CASE : Tuple = urllib.request.build_opener() _SCREAMING_SNAKE_CASE : List[str] = [ ( """User-Agent""", """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36""" """ (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""", ) ] urllib.request.install_opener(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[Any] = F"""query_{query.replace(" " , "_" )}""" if not os.path.exists(__SCREAMING_SNAKE_CASE ): os.makedirs(__SCREAMING_SNAKE_CASE ) urllib.request.urlretrieve( # noqa: S310 __SCREAMING_SNAKE_CASE , F"""{path_name}/original_size_img_{index}.jpg""" ) return index if __name__ == "__main__": try: lowerCAmelCase_ = download_images_from_google_query(sys.argv[1]) print(F"{image_count} images were downloaded to disk.") except IndexError: print('''Please provide a search term.''') raise
635
"""simple docstring""" import argparse from collections import defaultdict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : str = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines() _SCREAMING_SNAKE_CASE : Optional[Any] = F"""class {class_name}(""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{4 * " "}def {test_name}(""" _SCREAMING_SNAKE_CASE : Tuple = F"""{8 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{16 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[str] = False _SCREAMING_SNAKE_CASE : Tuple = False _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : Any = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = 0 _SCREAMING_SNAKE_CASE : Dict = [] for line in lines: if line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = True elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = True elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )): _SCREAMING_SNAKE_CASE : Dict = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _SCREAMING_SNAKE_CASE : int = True if in_class and in_func and in_line: if ")" not in line: continue else: _SCREAMING_SNAKE_CASE : Any = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) _SCREAMING_SNAKE_CASE : Optional[int] = False else: new_lines.append(__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , """w""" ) as f: for line in new_lines: f.write(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None )-> Optional[Any]: if fail is not None: with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()} else: _SCREAMING_SNAKE_CASE : str = None with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : str = f.readlines() _SCREAMING_SNAKE_CASE : str = defaultdict(__SCREAMING_SNAKE_CASE ) for line in correct_lines: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) lowerCAmelCase_ = parser.parse_args() main(args.correct_filename, args.fail_filename)
635
1
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) _SCREAMING_SNAKE_CASE : int = precision _SCREAMING_SNAKE_CASE : Dict = ceil(precision / 14 ) _SCREAMING_SNAKE_CASE : int = 426_880 * Decimal(10_005 ).sqrt() _SCREAMING_SNAKE_CASE : Union[str, Any] = 1 _SCREAMING_SNAKE_CASE : str = 13_591_409 _SCREAMING_SNAKE_CASE : Tuple = Decimal(__SCREAMING_SNAKE_CASE ) for k in range(1 , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__SCREAMING_SNAKE_CASE ) ** 3) linear_term += 545_140_134 exponential_term *= -262_537_412_640_768_000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(F"The first {n} digits of pi is: {pi(n)}")
635
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowerCAmelCase_ = { '''text_branch''': '''text_model''', '''audio_branch''': '''audio_model.audio_encoder''', '''attn''': '''attention.self''', '''self.proj''': '''output.dense''', '''attention.self_mask''': '''attn_mask''', '''mlp.fc1''': '''intermediate.dense''', '''mlp.fc2''': '''output.dense''', '''norm1''': '''layernorm_before''', '''norm2''': '''layernorm_after''', '''bn0''': '''batch_norm''', } lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''') def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = create_model( """HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = {} _SCREAMING_SNAKE_CASE : Optional[Any] = R""".*sequential.(\d+).*""" _SCREAMING_SNAKE_CASE : Any = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # replace sequential layers with list _SCREAMING_SNAKE_CASE : List[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) _SCREAMING_SNAKE_CASE : Dict = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.""" ) elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... _SCREAMING_SNAKE_CASE : Dict = 1 if projecton_layer == 0 else 2 _SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value _SCREAMING_SNAKE_CASE : Dict = value _SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv.size(0 ) // 3 _SCREAMING_SNAKE_CASE : Optional[Any] = mixed_qkv[:qkv_dim] _SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim : qkv_dim * 2] _SCREAMING_SNAKE_CASE : Any = mixed_qkv[qkv_dim * 2 :] _SCREAMING_SNAKE_CASE : Dict = query_layer _SCREAMING_SNAKE_CASE : List[Any] = key_layer _SCREAMING_SNAKE_CASE : Dict = value_layer else: _SCREAMING_SNAKE_CASE : Optional[Any] = value return model_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> List[Any]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE ) clap_model.eval() _SCREAMING_SNAKE_CASE : Dict = clap_model.state_dict() _SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = ClapConfig() _SCREAMING_SNAKE_CASE : Tuple = enable_fusion _SCREAMING_SNAKE_CASE : Dict = ClapModel(__SCREAMING_SNAKE_CASE ) # ignore the spectrogram embedding layer model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''') lowerCAmelCase_ = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
635
1
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Any: _SCREAMING_SNAKE_CASE : Tuple = len(__SCREAMING_SNAKE_CASE ) for i in range(length - 1 ): _SCREAMING_SNAKE_CASE : str = i for k in range(i + 1 , __SCREAMING_SNAKE_CASE ): if collection[k] < collection[least]: _SCREAMING_SNAKE_CASE : Tuple = k if least != i: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = (collection[i], collection[least]) return collection if __name__ == "__main__": lowerCAmelCase_ = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase_ = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
635
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , ) assert hasattr(self , """env""") def _lowerCAmelCase ( self : Union[str, Any] , _A : str=1): """simple docstring""" return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , ) def _lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any]): """simple docstring""" TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""") def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.create_estimator() # run training estimator.fit() # result dataframe _SCREAMING_SNAKE_CASE : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""]) _SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""]) # get train time from SageMaker job, this includes starting, preprocessing, stopping _SCREAMING_SNAKE_CASE : int = ( Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy) assert all(t <= self.results["""eval_loss"""] for t in eval_loss) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
635
1
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowerCAmelCase_ = imread(R'''digital_image_processing/image_data/lena_small.jpg''') lowerCAmelCase_ = cvtColor(img, COLOR_BGR2GRAY) def lowerCamelCase_()-> str: _SCREAMING_SNAKE_CASE : int = cn.convert_to_negative(__SCREAMING_SNAKE_CASE ) # assert negative_img array for at least one True assert negative_img.any() def lowerCamelCase_()-> Dict: with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img: # Work around assertion for response assert str(cc.change_contrast(__SCREAMING_SNAKE_CASE , 110 ) ).startswith( """<PIL.Image.Image image mode=RGB size=100x100 at""" ) def lowerCamelCase_()-> Any: _SCREAMING_SNAKE_CASE : List[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def lowerCamelCase_()-> int: _SCREAMING_SNAKE_CASE : str = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 ) # assert ambiguous array for all == True assert canny_img.all() _SCREAMING_SNAKE_CASE : Dict = canny.canny(__SCREAMING_SNAKE_CASE ) # assert canny array for at least one True assert canny_array.any() def lowerCamelCase_()-> List[str]: assert gg.gaussian_filter(__SCREAMING_SNAKE_CASE , 5 , sigma=0.9 ).all() def lowerCamelCase_()-> List[str]: # laplace diagonals _SCREAMING_SNAKE_CASE : Dict = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) _SCREAMING_SNAKE_CASE : int = conv.img_convolve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE ) assert res.any() def lowerCamelCase_()-> str: assert med.median_filter(__SCREAMING_SNAKE_CASE , 3 ).any() def lowerCamelCase_()-> int: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = sob.sobel_filter(__SCREAMING_SNAKE_CASE ) assert grad.any() and theta.any() def lowerCamelCase_()-> str: _SCREAMING_SNAKE_CASE : Optional[int] = sp.make_sepia(__SCREAMING_SNAKE_CASE , 20 ) assert sepia.all() def lowerCamelCase_(__SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" )-> str: _SCREAMING_SNAKE_CASE : Any = bs.Burkes(imread(__SCREAMING_SNAKE_CASE , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def lowerCamelCase_(__SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" , )-> str: _SCREAMING_SNAKE_CASE : Union[str, Any] = rs.NearestNeighbour(imread(__SCREAMING_SNAKE_CASE , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def lowerCamelCase_()-> Tuple: _SCREAMING_SNAKE_CASE : Union[str, Any] = """digital_image_processing/image_data/lena.jpg""" # Reading the image and converting it to grayscale. _SCREAMING_SNAKE_CASE : Tuple = imread(__SCREAMING_SNAKE_CASE , 0 ) # Test for get_neighbors_pixel function() return not None _SCREAMING_SNAKE_CASE : Optional[Any] = 0 _SCREAMING_SNAKE_CASE : Tuple = 0 _SCREAMING_SNAKE_CASE : Optional[int] = image[x_coordinate][y_coordinate] _SCREAMING_SNAKE_CASE : int = lbp.get_neighbors_pixel( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image _SCREAMING_SNAKE_CASE : Any = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): _SCREAMING_SNAKE_CASE : Optional[Any] = lbp.local_binary_value(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert lbp_image.any()
635
"""simple docstring""" import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip lowerCAmelCase_ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: return max(metric_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for gt in ground_truths ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Dict = [] if args.gold_data_mode == "qa": _SCREAMING_SNAKE_CASE : int = pd.read_csv(__SCREAMING_SNAKE_CASE , sep="""\t""" , header=__SCREAMING_SNAKE_CASE ) for answer_list in data[1]: _SCREAMING_SNAKE_CASE : Union[str, Any] = ast.literal_eval(__SCREAMING_SNAKE_CASE ) answers.append(__SCREAMING_SNAKE_CASE ) else: _SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[int] = [[reference] for reference in references] _SCREAMING_SNAKE_CASE : Optional[int] = 0 for prediction, ground_truths in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): total += 1 em += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) fa += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = 1_00.0 * em / total _SCREAMING_SNAKE_CASE : Optional[Any] = 1_00.0 * fa / total logger.info(F"""F1: {fa:.2f}""" ) logger.info(F"""EM: {em:.2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = args.k _SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[Any] = 0 for hypo, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = set(hypo.split("""\t""" )[:k] ) _SCREAMING_SNAKE_CASE : Union[str, Any] = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k _SCREAMING_SNAKE_CASE : int = 1_00.0 * em / total logger.info(F"""Precision@{k}: {em: .2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: def strip_title(__SCREAMING_SNAKE_CASE ): if title.startswith("""\"""" ): _SCREAMING_SNAKE_CASE : Optional[int] = title[1:] if title.endswith("""\"""" ): _SCREAMING_SNAKE_CASE : str = title[:-1] return title _SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device ) _SCREAMING_SNAKE_CASE : List[str] = rag_model.rag.question_encoder(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = question_enc_outputs[0] _SCREAMING_SNAKE_CASE : List[Any] = rag_model.retriever( __SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) _SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for docs in all_docs: _SCREAMING_SNAKE_CASE : str = [strip_title(__SCREAMING_SNAKE_CASE ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(__SCREAMING_SNAKE_CASE ) ) return provenance_strings def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.attention_mask.to(args.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.generate( # rag_model overwrites generate __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) _SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) if args.print_predictions: for q, a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): logger.info("""Q: {} - A: {}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) return answers def lowerCamelCase_()-> List[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__SCREAMING_SNAKE_CASE , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=__SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=__SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=__SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__SCREAMING_SNAKE_CASE , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=__SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=__SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=__SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=50 , type=__SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) _SCREAMING_SNAKE_CASE : Dict = parser.parse_args() _SCREAMING_SNAKE_CASE : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : Union[str, Any] = {} if args.model_type is None: _SCREAMING_SNAKE_CASE : Optional[int] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration _SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs if args.index_name is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = args.index_name if args.index_path is not None: _SCREAMING_SNAKE_CASE : Any = args.index_path else: _SCREAMING_SNAKE_CASE : Any = BartForConditionalGeneration _SCREAMING_SNAKE_CASE : int = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = get_scores if args.eval_mode == """e2e""" else get_precision_at_k _SCREAMING_SNAKE_CASE : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(__SCREAMING_SNAKE_CASE ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , retriever=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.retriever.init_retrieval() else: _SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: _SCREAMING_SNAKE_CASE : str = [] for line in tqdm(__SCREAMING_SNAKE_CASE ): questions.append(line.strip() ) if len(__SCREAMING_SNAKE_CASE ) == args.eval_batch_size: _SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) + """\n""" ) preds_file.flush() _SCREAMING_SNAKE_CASE : Any = [] if len(__SCREAMING_SNAKE_CASE ) > 0: _SCREAMING_SNAKE_CASE : List[str] = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) ) preds_file.flush() score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": lowerCAmelCase_ = get_args() main(args)
635
1
"""simple docstring""" # This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: _SCREAMING_SNAKE_CASE : str = multiprocessing.Manager() _SCREAMING_SNAKE_CASE : Optional[int] = manager.list() _SCREAMING_SNAKE_CASE : Optional[Any] = multiprocessing.Process(target=__SCREAMING_SNAKE_CASE , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("""timed out""" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil _SCREAMING_SNAKE_CASE : Optional[Any] = shutil.rmtree _SCREAMING_SNAKE_CASE : List[str] = os.rmdir _SCREAMING_SNAKE_CASE : List[str] = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: _SCREAMING_SNAKE_CASE : Any = {} with swallow_io(): with time_limit(__SCREAMING_SNAKE_CASE ): exec(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) result.append("""passed""" ) except TimeoutException: result.append("""timed out""" ) except BaseException as e: result.append(F"""failed: {e}""" ) # Needed for cleaning up. _SCREAMING_SNAKE_CASE : List[str] = rmtree _SCREAMING_SNAKE_CASE : Tuple = rmdir _SCREAMING_SNAKE_CASE : Any = chdir @contextlib.contextmanager def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Union[str, Any]: def signal_handler(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TimeoutException("""Timed out!""" ) signal.setitimer(signal.ITIMER_REAL , __SCREAMING_SNAKE_CASE ) signal.signal(signal.SIGALRM , __SCREAMING_SNAKE_CASE ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def lowerCamelCase_()-> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = WriteOnlyStringIO() with contextlib.redirect_stdout(__SCREAMING_SNAKE_CASE ): with contextlib.redirect_stderr(__SCREAMING_SNAKE_CASE ): with redirect_stdin(__SCREAMING_SNAKE_CASE ): yield @contextlib.contextmanager def lowerCamelCase_()-> List[Any]: with tempfile.TemporaryDirectory() as dirname: with chdir(__SCREAMING_SNAKE_CASE ): yield dirname class _snake_case ( __snake_case ): """simple docstring""" pass class _snake_case ( io.StringIO ): """simple docstring""" def _lowerCAmelCase ( self : Tuple , *_A : Optional[int] , **_A : Union[str, Any]): """simple docstring""" raise OSError def _lowerCAmelCase ( self : int , *_A : Optional[int] , **_A : Any): """simple docstring""" raise OSError def _lowerCAmelCase ( self : str , *_A : str , **_A : List[str]): """simple docstring""" raise OSError def _lowerCAmelCase ( self : Optional[Any] , *_A : Tuple , **_A : List[str]): """simple docstring""" return False class _snake_case ( contextlib._RedirectStream ): # type: ignore """simple docstring""" a = "stdin" @contextlib.contextmanager def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: if root == ".": yield return _SCREAMING_SNAKE_CASE : str = os.getcwd() os.chdir(__SCREAMING_SNAKE_CASE ) try: yield except BaseException as exc: raise exc finally: os.chdir(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE=None )-> Union[str, Any]: if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins _SCREAMING_SNAKE_CASE : Any = None _SCREAMING_SNAKE_CASE : Optional[Any] = None import os _SCREAMING_SNAKE_CASE : str = """1""" _SCREAMING_SNAKE_CASE : List[str] = None _SCREAMING_SNAKE_CASE : str = None _SCREAMING_SNAKE_CASE : Tuple = None _SCREAMING_SNAKE_CASE : Any = None _SCREAMING_SNAKE_CASE : str = None _SCREAMING_SNAKE_CASE : Optional[Any] = None _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : Optional[Any] = None _SCREAMING_SNAKE_CASE : Dict = None _SCREAMING_SNAKE_CASE : int = None _SCREAMING_SNAKE_CASE : Dict = None _SCREAMING_SNAKE_CASE : Optional[int] = None _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : Tuple = None _SCREAMING_SNAKE_CASE : Tuple = None _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : Any = None _SCREAMING_SNAKE_CASE : Any = None _SCREAMING_SNAKE_CASE : List[str] = None _SCREAMING_SNAKE_CASE : str = None _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : Dict = None _SCREAMING_SNAKE_CASE : int = None _SCREAMING_SNAKE_CASE : int = None _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : List[str] = None import shutil _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : Tuple = None _SCREAMING_SNAKE_CASE : Union[str, Any] = None import subprocess _SCREAMING_SNAKE_CASE : Tuple = None # type: ignore _SCREAMING_SNAKE_CASE : Tuple = None import sys _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : Any = None _SCREAMING_SNAKE_CASE : Any = None _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : Tuple = None
635
"""simple docstring""" import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def lowerCamelCase_(__SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=1_026 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , __SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , )-> Union[str, Any]: set_seed(3 ) # generate train_data and objective_set _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = generate_datasets( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , number=__SCREAMING_SNAKE_CASE , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? _SCREAMING_SNAKE_CASE : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # load pretrained model _SCREAMING_SNAKE_CASE : Any = load_gpta("""gpt2""" ).to(__SCREAMING_SNAKE_CASE ) print("""computing perplexity on objective set""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).item() print("""perplexity on objective set:""" , __SCREAMING_SNAKE_CASE ) # collect igf pairs and save to file demo.jbl collect_objective_set(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=15 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE="igf_model.pt" , )-> Optional[int]: set_seed(42 ) # Load pre-trained model _SCREAMING_SNAKE_CASE : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" ) # Initialize secondary learner to use embedding weights of model _SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(__SCREAMING_SNAKE_CASE ) # Train secondary learner _SCREAMING_SNAKE_CASE : Any = train_secondary_learner( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_epochs=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=__SCREAMING_SNAKE_CASE , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_000 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=recopy_gpta , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = RandomSampler(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(__SCREAMING_SNAKE_CASE )) + 1 _SCREAMING_SNAKE_CASE : List[Any] = 0 _SCREAMING_SNAKE_CASE : Any = torch.zeros((1, context_len) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = recopy_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) model.train() if secondary_learner is not None: secondary_learner.to(__SCREAMING_SNAKE_CASE ) secondary_learner.eval() _SCREAMING_SNAKE_CASE : Dict = [] _SCREAMING_SNAKE_CASE : Optional[int] = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = [] _SCREAMING_SNAKE_CASE : int = [] # Compute the performance of the transformer model at the beginning _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) for epoch in range(int(__SCREAMING_SNAKE_CASE ) ): for step, example in enumerate(__SCREAMING_SNAKE_CASE ): torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 ) _SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() _SCREAMING_SNAKE_CASE : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = True if secondary_learner is not None: _SCREAMING_SNAKE_CASE : List[Any] = secondary_learner.forward( torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item() observed_qs.append(float(__SCREAMING_SNAKE_CASE ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: _SCREAMING_SNAKE_CASE : Dict = -1 if predicted_q < threshold: _SCREAMING_SNAKE_CASE : List[str] = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) _SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def lowerCamelCase_()-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" ) # Required parameters parser.add_argument( """--data_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain data files for WikiText.""" , ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help=( """A jbl file containing tokenized data which can be split as objective dataset, """ """train_dataset and test_dataset.""" ) , ) parser.add_argument( """--igf_data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , ) parser.add_argument( """--output_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The output directory where the final fine-tuned model is stored.""" , ) parser.add_argument( """--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument("""--seed""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A seed for reproducible training.""" ) parser.add_argument( """--context_len""" , default=32 , type=__SCREAMING_SNAKE_CASE , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--size_objective_set""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""number of articles that are long enough to be used as our objective set""" , ) parser.add_argument( """--eval_freq""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""secondary model evaluation is triggered at eval_freq""" ) parser.add_argument("""--max_steps""" , default=1_000 , type=__SCREAMING_SNAKE_CASE , help="""To calculate training epochs""" ) parser.add_argument( """--secondary_learner_batch_size""" , default=128 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data for secondary learner""" , ) parser.add_argument( """--batch_size""" , default=16 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data of language model(gpt2) """ ) parser.add_argument( """--eval_interval""" , default=10 , type=__SCREAMING_SNAKE_CASE , help=( """decay the selectivity of our secondary learner filter from""" """1 standard deviation above average to 1 below average after 10 batches""" ) , ) parser.add_argument( """--number""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""The number of examples split to be used as objective_set/test_data""" ) parser.add_argument( """--min_len""" , default=1_026 , type=__SCREAMING_SNAKE_CASE , help="""The minimum length of the article to be used as objective set""" ) parser.add_argument( """--secondary_learner_max_epochs""" , default=15 , type=__SCREAMING_SNAKE_CASE , help="""number of epochs to train secondary learner""" ) parser.add_argument("""--trim""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""truncate the example if it exceeds context length""" ) parser.add_argument( """--threshold""" , default=1.0 , type=__SCREAMING_SNAKE_CASE , help=( """The threshold value used by secondary learner to filter the train_data and allow only""" """ informative data as input to the model""" ) , ) parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__SCREAMING_SNAKE_CASE , help="""finetuned_model_name""" ) parser.add_argument( """--recopy_model""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , ) # Load train data for secondary learner _SCREAMING_SNAKE_CASE : Optional[int] = joblib.load("""data/IGF_values.jbl""" ) # Train secondary learner _SCREAMING_SNAKE_CASE : int = training_secondary_learner( __SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , ) # load pretrained gpt2 model _SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = generate_datasets( context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=__SCREAMING_SNAKE_CASE , secondary_learner=__SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , ) if __name__ == "__main__": main()
635
1
"""simple docstring""" import os import jsonlines import numpy as np from tqdm import tqdm lowerCAmelCase_ = 2048 lowerCAmelCase_ = 4096 lowerCAmelCase_ = 42 lowerCAmelCase_ = os.environ.pop('''PROCESS_TRAIN''', '''false''') lowerCAmelCase_ = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4} def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: def choose_first(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ): assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) == 1: _SCREAMING_SNAKE_CASE : Any = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: _SCREAMING_SNAKE_CASE : Optional[Any] = {k: [a[k]] for k in a} if len(a["""start_token"""] ) > 0: break return a _SCREAMING_SNAKE_CASE : Tuple = {"""id""": example["""id"""]} _SCREAMING_SNAKE_CASE : List[str] = example["""annotations"""] _SCREAMING_SNAKE_CASE : List[str] = annotation["""yes_no_answer"""] if 0 in yes_no_answer or 1 in yes_no_answer: _SCREAMING_SNAKE_CASE : List[Any] = ["""yes"""] if 1 in yes_no_answer else ["""no"""] _SCREAMING_SNAKE_CASE : int = [] _SCREAMING_SNAKE_CASE : Any = [] _SCREAMING_SNAKE_CASE : str = ["""<cls>"""] else: _SCREAMING_SNAKE_CASE : int = ["""short"""] _SCREAMING_SNAKE_CASE : Any = choose_first(annotation["""short_answers"""] ) if len(out["""start_token"""] ) == 0: # answer will be long if short is not available _SCREAMING_SNAKE_CASE : Any = ["""long"""] _SCREAMING_SNAKE_CASE : Optional[Any] = choose_first(annotation["""long_answer"""] , is_long_answer=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = [] answer.update(__SCREAMING_SNAKE_CASE ) # disregard some samples if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]: _SCREAMING_SNAKE_CASE : List[Any] = True else: _SCREAMING_SNAKE_CASE : Optional[Any] = False _SCREAMING_SNAKE_CASE : Dict = ["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""] if not all(isinstance(answer[k] , __SCREAMING_SNAKE_CASE ) for k in cols ): raise ValueError("""Issue in ID""" , example["""id"""] ) return answer def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> Optional[int]: _SCREAMING_SNAKE_CASE : List[str] = _get_single_answer(__SCREAMING_SNAKE_CASE ) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element _SCREAMING_SNAKE_CASE : Optional[Any] = example["""document"""]["""tokens"""] _SCREAMING_SNAKE_CASE : Any = [] for i in range(len(doc["""token"""] ) ): if not doc["is_html"][i]: context.append(doc["""token"""][i] ) return { "context": " ".join(__SCREAMING_SNAKE_CASE ), "answer": { "start_token": -100, # ignore index in cross-entropy "end_token": -100, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples _SCREAMING_SNAKE_CASE : Any = ["""start_token""", """end_token"""] answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10 _SCREAMING_SNAKE_CASE : List[Any] = example["""document"""]["""tokens"""] _SCREAMING_SNAKE_CASE : List[str] = answer["""start_token"""] _SCREAMING_SNAKE_CASE : int = answer["""end_token"""] _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for i in range(len(doc["""token"""] ) ): if not doc["is_html"][i]: context.append(doc["""token"""][i] ) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 _SCREAMING_SNAKE_CASE : Optional[Any] = """ """.join(context[start_token:end_token] ) # checking above code if assertion: _SCREAMING_SNAKE_CASE : Union[str, Any] = doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]] _SCREAMING_SNAKE_CASE : int = doc["""token"""][answer["""start_token"""] : answer["""end_token"""]] _SCREAMING_SNAKE_CASE : Tuple = """ """.join([old[i] for i in range(len(__SCREAMING_SNAKE_CASE ) ) if not is_html[i]] ) if new != old: print("""ID:""" , example["""id"""] ) print("""New:""" , __SCREAMING_SNAKE_CASE , end="""\n""" ) print("""Old:""" , __SCREAMING_SNAKE_CASE , end="""\n\n""" ) return { "context": " ".join(__SCREAMING_SNAKE_CASE ), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=4_096 , __SCREAMING_SNAKE_CASE=True )-> Union[str, Any]: # overlap will be of doc_stride - q_len _SCREAMING_SNAKE_CASE : List[str] = get_context_and_ans(__SCREAMING_SNAKE_CASE , assertion=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = out["""answer"""] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } _SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids _SCREAMING_SNAKE_CASE : List[Any] = input_ids.index(tokenizer.sep_token_id ) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element _SCREAMING_SNAKE_CASE : Optional[Any] = [] _SCREAMING_SNAKE_CASE : Union[str, Any] = [] _SCREAMING_SNAKE_CASE : List[Any] = input_ids[:q_len] _SCREAMING_SNAKE_CASE : List[str] = range(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) , max_length - doc_stride ) for i in doc_start_indices: _SCREAMING_SNAKE_CASE : int = i + max_length - q_len _SCREAMING_SNAKE_CASE : str = input_ids[i:end_index] inputs.append(q_indices + slice ) category.append(answer["""category"""][0] ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-100] * len(__SCREAMING_SNAKE_CASE ), "end_token": [-100] * len(__SCREAMING_SNAKE_CASE ), "category": category, }, } _SCREAMING_SNAKE_CASE : Optional[int] = out["""context"""].split() _SCREAMING_SNAKE_CASE : str = splitted_context[answer["""end_token"""]] _SCREAMING_SNAKE_CASE : Optional[int] = len( tokenizer( """ """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=__SCREAMING_SNAKE_CASE , ).input_ids ) _SCREAMING_SNAKE_CASE : Optional[Any] = len( tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=__SCREAMING_SNAKE_CASE ).input_ids ) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token _SCREAMING_SNAKE_CASE : int = len(tokenizer(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ).input_ids ) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 _SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive _SCREAMING_SNAKE_CASE : List[str] = answer["""start_token"""] _SCREAMING_SNAKE_CASE : Any = answer["""end_token"""] if assertion: _SCREAMING_SNAKE_CASE : Any = tokenizer.decode(__SCREAMING_SNAKE_CASE ) if answer["span"] != new: print("""ISSUE IN TOKENIZATION""" ) print("""OLD:""" , answer["""span"""] ) print("""NEW:""" , __SCREAMING_SNAKE_CASE , end="""\n\n""" ) if len(__SCREAMING_SNAKE_CASE ) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } _SCREAMING_SNAKE_CASE : List[str] = input_ids[:q_len] _SCREAMING_SNAKE_CASE : str = range(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) , max_length - doc_stride ) _SCREAMING_SNAKE_CASE : Dict = [] _SCREAMING_SNAKE_CASE : int = [] _SCREAMING_SNAKE_CASE : List[Any] = [] _SCREAMING_SNAKE_CASE : Union[str, Any] = [] # null, yes, no, long, short for i in doc_start_indices: _SCREAMING_SNAKE_CASE : str = i + max_length - q_len _SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids[i:end_index] inputs.append(q_indices + slice ) assert len(inputs[-1] ) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: _SCREAMING_SNAKE_CASE : Any = start_token - i + q_len _SCREAMING_SNAKE_CASE : Optional[Any] = end_token - i + q_len answers_category.append(answer["""category"""][0] ) # ["short"] -> "short" else: _SCREAMING_SNAKE_CASE : Optional[Any] = -100 _SCREAMING_SNAKE_CASE : Optional[int] = -100 answers_category.append("""null""" ) _SCREAMING_SNAKE_CASE : Any = inputs[-1][start_token : end_token + 1] answers_start_token.append(__SCREAMING_SNAKE_CASE ) answers_end_token.append(__SCREAMING_SNAKE_CASE ) if assertion: if new != old and new != [tokenizer.cls_token_id]: print("""ISSUE in strided for ID:""" , example["""id"""] ) print("""New:""" , tokenizer.decode(__SCREAMING_SNAKE_CASE ) ) print("""Old:""" , tokenizer.decode(__SCREAMING_SNAKE_CASE ) , end="""\n\n""" ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=4_096 , __SCREAMING_SNAKE_CASE=False )-> Any: _SCREAMING_SNAKE_CASE : List[str] = get_strided_contexts_and_ans( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , doc_stride=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , assertion=__SCREAMING_SNAKE_CASE , ) return example def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: with jsonlines.open(__SCREAMING_SNAKE_CASE , """a""" ) as writer: for example in tqdm(__SCREAMING_SNAKE_CASE , total=len(__SCREAMING_SNAKE_CASE ) , desc="""Saving samples ... """ ): _SCREAMING_SNAKE_CASE : Union[str, Any] = example["""labels"""] for ids, start, end, cat in zip( example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { """input_ids""": ids, """start_token""": start, """end_token""": end, """category""": CATEGORY_MAPPING[cat], } ) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer lowerCAmelCase_ = load_dataset('''natural_questions''') lowerCAmelCase_ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''') lowerCAmelCase_ = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation'''] lowerCAmelCase_ = { '''tokenizer''': tokenizer, '''doc_stride''': DOC_STRIDE, '''max_length''': MAX_LENGTH, '''assertion''': False, } lowerCAmelCase_ = data.map(prepare_inputs, fn_kwargs=fn_kwargs) lowerCAmelCase_ = data.remove_columns(['''annotations''', '''document''', '''id''', '''question''']) print(data) np.random.seed(SEED) lowerCAmelCase_ = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl''' save_to_disk(data, file_name=cache_file_name)
635
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( __snake_case ): """simple docstring""" a = ["image_processor", "tokenizer"] a = "ChineseCLIPImageProcessor" a = ("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , _A : Tuple=None , _A : List[Any]=None , **_A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _A , ) _SCREAMING_SNAKE_CASE : str = kwargs.pop("""feature_extractor""") _SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""") if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""") super().__init__(_A , _A) _SCREAMING_SNAKE_CASE : Dict = self.image_processor def __call__( self : Optional[int] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , **_A : int): """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""") if text is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_A , return_tensors=_A , **_A) if images is not None: _SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(_A , return_tensors=_A , **_A) if text is not None and images is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A) , tensor_type=_A) def _lowerCAmelCase ( self : str , *_A : Any , **_A : Any): """simple docstring""" return self.tokenizer.batch_decode(*_A , **_A) def _lowerCAmelCase ( self : Union[str, Any] , *_A : List[Any] , **_A : Any): """simple docstring""" return self.tokenizer.decode(*_A , **_A) @property def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names _SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def _lowerCAmelCase ( self : List[str]): """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , ) return self.image_processor_class
635
1
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE = 100 )-> int: _SCREAMING_SNAKE_CASE : Any = set() _SCREAMING_SNAKE_CASE : List[Any] = 0 _SCREAMING_SNAKE_CASE : Tuple = n + 1 # maximum limit for a in range(2 , __SCREAMING_SNAKE_CASE ): for b in range(2 , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = a**b # calculates the current power collect_powers.add(__SCREAMING_SNAKE_CASE ) # adds the result to the set return len(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print('''Number of terms ''', solution(int(str(input()).strip())))
635
"""simple docstring""" import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = ['''model.decoder.embed_positions.weights'''] def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: if "emb" in name: _SCREAMING_SNAKE_CASE : List[Any] = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: _SCREAMING_SNAKE_CASE : List[str] = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: _SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: _SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: _SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: _SCREAMING_SNAKE_CASE : int = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: _SCREAMING_SNAKE_CASE : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: _SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: _SCREAMING_SNAKE_CASE : Tuple = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: _SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: _SCREAMING_SNAKE_CASE : str = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple[Dict, Dict]: _SCREAMING_SNAKE_CASE : str = list(state_dict.keys() ) _SCREAMING_SNAKE_CASE : Tuple = {} for key in keys: _SCREAMING_SNAKE_CASE : Dict = state_dict.pop(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = rename_keys(__SCREAMING_SNAKE_CASE ) if "in_proj_weight" in key: # split fused qkv proj _SCREAMING_SNAKE_CASE : str = val[:hidden_size, :] _SCREAMING_SNAKE_CASE : Any = val[hidden_size : 2 * hidden_size, :] _SCREAMING_SNAKE_CASE : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: _SCREAMING_SNAKE_CASE : int = val else: _SCREAMING_SNAKE_CASE : Dict = val return state_dict, enc_dec_proj_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> MusicgenDecoderConfig: if checkpoint == "small": # default config values _SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 _SCREAMING_SNAKE_CASE : str = 24 _SCREAMING_SNAKE_CASE : Any = 16 elif checkpoint == "medium": _SCREAMING_SNAKE_CASE : Dict = 1_536 _SCREAMING_SNAKE_CASE : Union[str, Any] = 48 _SCREAMING_SNAKE_CASE : Optional[Any] = 24 elif checkpoint == "large": _SCREAMING_SNAKE_CASE : List[Any] = 2_048 _SCREAMING_SNAKE_CASE : Optional[int] = 48 _SCREAMING_SNAKE_CASE : str = 32 else: raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = MusicgenDecoderConfig( hidden_size=__SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=__SCREAMING_SNAKE_CASE , num_attention_heads=__SCREAMING_SNAKE_CASE , ) return config @torch.no_grad() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="cpu" )-> str: _SCREAMING_SNAKE_CASE : str = MusicGen.get_pretrained(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = decoder_config_from_checkpoint(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = fairseq_model.lm.state_dict() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = rename_state_dict( __SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size ) _SCREAMING_SNAKE_CASE : Tuple = TaEncoderModel.from_pretrained("""t5-base""" ) _SCREAMING_SNAKE_CASE : List[Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) _SCREAMING_SNAKE_CASE : str = MusicgenForCausalLM(__SCREAMING_SNAKE_CASE ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" ) if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model _SCREAMING_SNAKE_CASE : Dict = MusicgenForConditionalGeneration(text_encoder=__SCREAMING_SNAKE_CASE , audio_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__SCREAMING_SNAKE_CASE ) # check we can do a forward pass _SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) _SCREAMING_SNAKE_CASE : Dict = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits if logits.shape != (8, 1, 2_048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""t5-base""" ) _SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) _SCREAMING_SNAKE_CASE : Optional[int] = MusicgenProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE ) # set the appropriate bos/pad token ids _SCREAMING_SNAKE_CASE : Optional[Any] = 2_048 _SCREAMING_SNAKE_CASE : List[Any] = 2_048 # set other default generation config params _SCREAMING_SNAKE_CASE : Any = int(30 * audio_encoder.config.frame_rate ) _SCREAMING_SNAKE_CASE : Tuple = True _SCREAMING_SNAKE_CASE : int = 3.0 if pytorch_dump_folder is not None: Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if repo_id: logger.info(F"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(__SCREAMING_SNAKE_CASE ) processor.push_to_hub(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint''', default='''small''', type=str, help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''', ) parser.add_argument( '''--pytorch_dump_folder''', required=True, default=None, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) parser.add_argument( '''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.''' ) lowerCAmelCase_ = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
635
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''', '''BridgeTower/bridgetower-base-itm-mlm''': ( '''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json''' ), } class _snake_case ( __snake_case ): """simple docstring""" a = "bridgetower_vision_model" def __init__( self : Union[str, Any] , _A : Union[str, Any]=7_6_8 , _A : Tuple=1_2 , _A : List[Any]=3 , _A : Dict=1_6 , _A : str=2_8_8 , _A : Optional[Any]=1 , _A : Union[str, Any]=1e-05 , _A : Any=False , _A : int=True , _A : Optional[int]=False , **_A : int , ): """simple docstring""" super().__init__(**_A) _SCREAMING_SNAKE_CASE : int = hidden_size _SCREAMING_SNAKE_CASE : int = num_hidden_layers _SCREAMING_SNAKE_CASE : Optional[int] = num_channels _SCREAMING_SNAKE_CASE : str = patch_size _SCREAMING_SNAKE_CASE : int = image_size _SCREAMING_SNAKE_CASE : Tuple = initializer_factor _SCREAMING_SNAKE_CASE : str = layer_norm_eps _SCREAMING_SNAKE_CASE : Optional[Any] = stop_gradient _SCREAMING_SNAKE_CASE : Any = share_layernorm _SCREAMING_SNAKE_CASE : int = remove_last_layer @classmethod def _lowerCAmelCase ( cls : Optional[int] , _A : Union[str, os.PathLike] , **_A : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(_A , **_A) if config_dict.get("""model_type""") == "bridgetower": _SCREAMING_SNAKE_CASE : List[str] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(_A , **_A) class _snake_case ( __snake_case ): """simple docstring""" a = "bridgetower_text_model" def __init__( self : Any , _A : int=5_0_2_6_5 , _A : Union[str, Any]=7_6_8 , _A : int=1_2 , _A : Tuple=1_2 , _A : Any=1 , _A : List[Any]=3_0_7_2 , _A : str="gelu" , _A : List[Any]=0.1 , _A : Union[str, Any]=0.1 , _A : List[str]=5_1_4 , _A : Union[str, Any]=1 , _A : str=1e-05 , _A : Dict=1 , _A : Union[str, Any]=0 , _A : Any=2 , _A : Dict="absolute" , _A : Dict=True , **_A : Any , ): """simple docstring""" super().__init__(**_A) _SCREAMING_SNAKE_CASE : int = vocab_size _SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size _SCREAMING_SNAKE_CASE : Any = num_hidden_layers _SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads _SCREAMING_SNAKE_CASE : int = hidden_act _SCREAMING_SNAKE_CASE : Optional[Any] = initializer_factor _SCREAMING_SNAKE_CASE : str = intermediate_size _SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob _SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings _SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size _SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps _SCREAMING_SNAKE_CASE : Any = position_embedding_type _SCREAMING_SNAKE_CASE : List[Any] = use_cache _SCREAMING_SNAKE_CASE : List[Any] = pad_token_id _SCREAMING_SNAKE_CASE : List[str] = bos_token_id _SCREAMING_SNAKE_CASE : Union[str, Any] = eos_token_id @classmethod def _lowerCAmelCase ( cls : int , _A : Union[str, os.PathLike] , **_A : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = cls.get_config_dict(_A , **_A) if config_dict.get("""model_type""") == "bridgetower": _SCREAMING_SNAKE_CASE : Any = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(_A , **_A) class _snake_case ( __snake_case ): """simple docstring""" a = "bridgetower" def __init__( self : List[str] , _A : str=True , _A : Tuple="gelu" , _A : Optional[Any]=7_6_8 , _A : Dict=1 , _A : Tuple=1e-05 , _A : Dict=False , _A : Tuple="add" , _A : Tuple=1_2 , _A : Any=6 , _A : Union[str, Any]=False , _A : Dict=False , _A : str=None , _A : Optional[Any]=None , **_A : Optional[int] , ): """simple docstring""" _SCREAMING_SNAKE_CASE : str = kwargs.pop("""text_config_dict""" , _A) _SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("""vision_config_dict""" , _A) super().__init__(**_A) _SCREAMING_SNAKE_CASE : str = share_cross_modal_transformer_layers _SCREAMING_SNAKE_CASE : int = hidden_act _SCREAMING_SNAKE_CASE : Optional[int] = hidden_size _SCREAMING_SNAKE_CASE : Optional[int] = initializer_factor _SCREAMING_SNAKE_CASE : str = layer_norm_eps _SCREAMING_SNAKE_CASE : Dict = share_link_tower_layers _SCREAMING_SNAKE_CASE : Union[str, Any] = link_tower_type _SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads _SCREAMING_SNAKE_CASE : Dict = num_hidden_layers _SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings _SCREAMING_SNAKE_CASE : Any = init_layernorm_from_vision_encoder if text_config is None: _SCREAMING_SNAKE_CASE : Optional[Any] = {} logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""") if vision_config is None: _SCREAMING_SNAKE_CASE : Tuple = {} logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""") _SCREAMING_SNAKE_CASE : Optional[Any] = BridgeTowerTextConfig(**_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = BridgeTowerVisionConfig(**_A) @classmethod def _lowerCAmelCase ( cls : str , _A : BridgeTowerTextConfig , _A : BridgeTowerVisionConfig , **_A : Union[str, Any]): """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_A) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = copy.deepcopy(self.__dict__) _SCREAMING_SNAKE_CASE : Tuple = self.text_config.to_dict() _SCREAMING_SNAKE_CASE : Dict = self.vision_config.to_dict() _SCREAMING_SNAKE_CASE : List[str] = self.__class__.model_type return output
635
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''', # See all SEW models at https://huggingface.co/models?filter=sew } class _snake_case ( __snake_case ): """simple docstring""" a = "sew" def __init__( self : List[Any] , _A : Tuple=3_2 , _A : str=7_6_8 , _A : Dict=1_2 , _A : Tuple=1_2 , _A : Optional[Any]=3_0_7_2 , _A : List[str]=2 , _A : Dict="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.0 , _A : str=0.1 , _A : Tuple=0.1 , _A : Optional[int]=0.02 , _A : Dict=1e-5 , _A : str="group" , _A : Tuple="gelu" , _A : Union[str, Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _A : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Tuple=False , _A : Tuple=1_2_8 , _A : int=1_6 , _A : Union[str, Any]=True , _A : Optional[Any]=0.05 , _A : List[Any]=1_0 , _A : Union[str, Any]=2 , _A : Tuple=0.0 , _A : Union[str, Any]=1_0 , _A : Optional[int]=0 , _A : Union[str, Any]="mean" , _A : Optional[int]=False , _A : List[Any]=False , _A : int=2_5_6 , _A : str=0 , _A : Optional[int]=1 , _A : List[Any]=2 , **_A : Dict , ): """simple docstring""" super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A) _SCREAMING_SNAKE_CASE : str = hidden_size _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation _SCREAMING_SNAKE_CASE : Dict = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : str = conv_bias _SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embeddings _SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups _SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim) _SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers _SCREAMING_SNAKE_CASE : List[str] = intermediate_size _SCREAMING_SNAKE_CASE : str = squeeze_factor _SCREAMING_SNAKE_CASE : Dict = hidden_act _SCREAMING_SNAKE_CASE : str = num_attention_heads _SCREAMING_SNAKE_CASE : Dict = hidden_dropout _SCREAMING_SNAKE_CASE : Tuple = attention_dropout _SCREAMING_SNAKE_CASE : int = activation_dropout _SCREAMING_SNAKE_CASE : Any = feat_proj_dropout _SCREAMING_SNAKE_CASE : str = final_dropout _SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop _SCREAMING_SNAKE_CASE : Any = layer_norm_eps _SCREAMING_SNAKE_CASE : int = initializer_range _SCREAMING_SNAKE_CASE : List[Any] = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""") # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment _SCREAMING_SNAKE_CASE : List[Any] = mask_time_prob _SCREAMING_SNAKE_CASE : List[str] = mask_time_length _SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_prob _SCREAMING_SNAKE_CASE : int = mask_feature_length _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks # ctc loss _SCREAMING_SNAKE_CASE : int = ctc_loss_reduction _SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity # sequence classification _SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum _SCREAMING_SNAKE_CASE : List[str] = classifier_proj_size @property def _lowerCAmelCase ( self : Any): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1)
635
1
"""simple docstring""" import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class _snake_case : """simple docstring""" def __init__( self : Tuple , _A : List[str] , _A : List[str]=1_3 , _A : int=7 , _A : int=True , _A : List[str]=True , _A : Optional[int]=False , _A : Union[str, Any]=True , _A : int=9_9 , _A : List[str]=6_4 , _A : Tuple=5 , _A : List[str]=4 , _A : str=6_4 , _A : List[str]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Optional[int]=5_1_2 , _A : int=1_6 , _A : Union[str, Any]=2 , _A : int=0.02 , _A : str=3 , _A : Union[str, Any]=4 , _A : Tuple=None , ): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = parent _SCREAMING_SNAKE_CASE : List[str] = batch_size _SCREAMING_SNAKE_CASE : Dict = seq_length _SCREAMING_SNAKE_CASE : int = is_training _SCREAMING_SNAKE_CASE : int = use_input_mask _SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids _SCREAMING_SNAKE_CASE : str = use_labels _SCREAMING_SNAKE_CASE : Optional[int] = vocab_size _SCREAMING_SNAKE_CASE : Tuple = hidden_size _SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers _SCREAMING_SNAKE_CASE : Tuple = num_attention_heads _SCREAMING_SNAKE_CASE : List[Any] = intermediate_size _SCREAMING_SNAKE_CASE : Dict = hidden_act _SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob _SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : Dict = max_position_embeddings _SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size _SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size _SCREAMING_SNAKE_CASE : List[Any] = initializer_range _SCREAMING_SNAKE_CASE : int = num_labels _SCREAMING_SNAKE_CASE : Optional[int] = num_choices _SCREAMING_SNAKE_CASE : Dict = scope def _lowerCAmelCase ( self : Dict): """simple docstring""" return MPNetConfig.from_pretrained("""microsoft/mpnet-base""") def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _SCREAMING_SNAKE_CASE : Dict = None if self.use_input_mask: _SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length]) _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : Dict = None _SCREAMING_SNAKE_CASE : Dict = None if self.use_labels: _SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.type_sequence_label_size) _SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.num_choices) _SCREAMING_SNAKE_CASE : List[str] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCAmelCase ( self : List[str]): """simple docstring""" return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def _lowerCAmelCase ( self : List[str] , _A : List[str] , _A : Any , _A : Optional[Any] , _A : Optional[Any] , _A : List[Any] , _A : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = MPNetModel(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : Tuple = model(_A , _A) _SCREAMING_SNAKE_CASE : Dict = model(_A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def _lowerCAmelCase ( self : Any , _A : Tuple , _A : Tuple , _A : Tuple , _A : Tuple , _A : Any , _A : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = MPNetForQuestionAnswering(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : Optional[Any] = model( _A , attention_mask=_A , start_positions=_A , end_positions=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : str , _A : Any , _A : List[str] , _A : str , _A : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels _SCREAMING_SNAKE_CASE : Any = MPNetForSequenceClassification(_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _lowerCAmelCase ( self : Union[str, Any] , _A : List[str] , _A : List[str] , _A : List[str] , _A : List[str] , _A : Any , _A : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices _SCREAMING_SNAKE_CASE : Union[str, Any] = MPNetForMultipleChoice(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _SCREAMING_SNAKE_CASE : int = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _SCREAMING_SNAKE_CASE : Dict = model( _A , attention_mask=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def _lowerCAmelCase ( self : Optional[int] , _A : List[str] , _A : int , _A : Optional[int] , _A : Tuple , _A : Any , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels _SCREAMING_SNAKE_CASE : int = MPNetForTokenClassification(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A , labels=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs() ((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) : Tuple = config_and_inputs _SCREAMING_SNAKE_CASE : int = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): """simple docstring""" a = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) a = ( { "feature-extraction": MPNetModel, "fill-mask": MPNetForMaskedLM, "question-answering": MPNetForQuestionAnswering, "text-classification": MPNetForSequenceClassification, "token-classification": MPNetForTokenClassification, "zero-shot": MPNetForSequenceClassification, } if is_torch_available() else {} ) a = False a = True def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = MPNetModelTester(self) _SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=_A , hidden_size=3_7) def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" self.config_tester.run_common_tests() def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*_A) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*_A) def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*_A) def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*_A) def _lowerCAmelCase ( self : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*_A) @require_torch class _snake_case ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = MPNetModel.from_pretrained("""microsoft/mpnet-base""") _SCREAMING_SNAKE_CASE : Any = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]]) _SCREAMING_SNAKE_CASE : Tuple = model(_A)[0] _SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_1, 7_6_8)) self.assertEqual(output.shape , _A) _SCREAMING_SNAKE_CASE : List[Any] = torch.tensor( [[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]]) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4))
635
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''', '''UniSpeechForCTC''', '''UniSpeechForPreTraining''', '''UniSpeechForSequenceClassification''', '''UniSpeechModel''', '''UniSpeechPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
1
"""simple docstring""" from collections.abc import Callable import numpy as np def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> np.ndarray: _SCREAMING_SNAKE_CASE : Tuple = int(np.ceil((x_end - xa) / step_size ) ) _SCREAMING_SNAKE_CASE : Optional[Any] = np.zeros((n + 1,) ) _SCREAMING_SNAKE_CASE : str = ya _SCREAMING_SNAKE_CASE : Optional[int] = xa for k in range(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = y[k] + step_size * ode_func(__SCREAMING_SNAKE_CASE , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
635
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : int = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : List[Any] = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str: if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = parquet_path elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] _SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=("train",) )-> Union[str, Any]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for split in splits: _SCREAMING_SNAKE_CASE : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : Dict = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: _SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[str] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : int = ParquetDatasetReader({"""train""": parquet_path} , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: if split: _SCREAMING_SNAKE_CASE : Union[str, Any] = {split: parquet_path} else: _SCREAMING_SNAKE_CASE : Optional[int] = """train""" _SCREAMING_SNAKE_CASE : Any = {"""train""": parquet_path, """test""": parquet_path} _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: _SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / """foo.parquet""" ) _SCREAMING_SNAKE_CASE : str = pf.read() assert dataset.data.table == output_table def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Dict = str(shared_datadir / """test_image_rgb.jpg""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = {"""image""": [image_path]} _SCREAMING_SNAKE_CASE : Optional[Any] = Features({"""image""": Image()} ) _SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__SCREAMING_SNAKE_CASE ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: assert get_writer_batch_size(__SCREAMING_SNAKE_CASE ) == expected
635
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = '''▁''' lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase_ = { '''vocab_file''': { '''google/reformer-crime-and-punishment''': ( '''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model''' ) } } lowerCAmelCase_ = { '''google/reformer-crime-and-punishment''': 524288, } class _snake_case ( __snake_case ): """simple docstring""" a = VOCAB_FILES_NAMES a = PRETRAINED_VOCAB_FILES_MAP a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , _A : int , _A : Any="</s>" , _A : Any="<unk>" , _A : Union[str, Any]=[] , _A : Optional[Dict[str, Any]] = None , **_A : List[Any] , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=_A , unk_token=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , ) _SCREAMING_SNAKE_CASE : Optional[Any] = vocab_file _SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(_A) @property def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" return self.sp_model.get_piece_size() def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = {self.convert_ids_to_tokens(_A): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self.__dict__.copy() _SCREAMING_SNAKE_CASE : List[Any] = None return state def __setstate__( self : Dict , _A : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = d # for backward compatibility if not hasattr(self , """sp_model_kwargs"""): _SCREAMING_SNAKE_CASE : int = {} _SCREAMING_SNAKE_CASE : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _lowerCAmelCase ( self : Optional[int] , _A : str): """simple docstring""" return self.sp_model.encode(_A , out_type=_A) def _lowerCAmelCase ( self : Optional[Any] , _A : Optional[int]): """simple docstring""" return self.sp_model.piece_to_id(_A) def _lowerCAmelCase ( self : Any , _A : Union[str, Any]): """simple docstring""" if index < self.sp_model.get_piece_size(): _SCREAMING_SNAKE_CASE : str = self.sp_model.IdToPiece(_A) return token def _lowerCAmelCase ( self : Tuple , _A : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = [] _SCREAMING_SNAKE_CASE : Any = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_A) + token _SCREAMING_SNAKE_CASE : Optional[int] = [] else: current_sub_tokens.append(_A) out_string += self.sp_model.decode(_A) return out_string.strip() def _lowerCAmelCase ( self : Tuple , _A : str , _A : Optional[str] = None): """simple docstring""" if not os.path.isdir(_A): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return _SCREAMING_SNAKE_CASE : Any = os.path.join( _A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) if os.path.abspath(self.vocab_file) != os.path.abspath(_A) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , _A) elif not os.path.isfile(self.vocab_file): with open(_A , """wb""") as fi: _SCREAMING_SNAKE_CASE : List[str] = self.sp_model.serialized_model_proto() fi.write(_A) return (out_vocab_file,)
635
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""only integers accepted as input""" ) else: _SCREAMING_SNAKE_CASE : List[Any] = str(abs(__SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : List[str] = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )] for index in range(len(__SCREAMING_SNAKE_CASE ) ): num_transpositions[index].pop(__SCREAMING_SNAKE_CASE ) return max( int("""""".join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
635
1
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool: _SCREAMING_SNAKE_CASE : List[str] = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def lowerCamelCase_(__SCREAMING_SNAKE_CASE = 5_000 )-> int: _SCREAMING_SNAKE_CASE : Any = [(i * (3 * i - 1)) // 2 for i in range(1 , __SCREAMING_SNAKE_CASE )] for i, pentagonal_i in enumerate(__SCREAMING_SNAKE_CASE ): for j in range(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ): _SCREAMING_SNAKE_CASE : Dict = pentagonal_nums[j] _SCREAMING_SNAKE_CASE : int = pentagonal_i + pentagonal_j _SCREAMING_SNAKE_CASE : Optional[Any] = pentagonal_j - pentagonal_i if is_pentagonal(__SCREAMING_SNAKE_CASE ) and is_pentagonal(__SCREAMING_SNAKE_CASE ): return b return -1 if __name__ == "__main__": print(F"{solution() = }")
635
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Dict = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : str = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : List[Any] = -1 _SCREAMING_SNAKE_CASE : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Any = tokenizer.decode(greedy_ids[0]) _SCREAMING_SNAKE_CASE : List[Any] = TextIteratorStreamer(_A) _SCREAMING_SNAKE_CASE : Any = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[Any] = Thread(target=model.generate , kwargs=_A) thread.start() _SCREAMING_SNAKE_CASE : Any = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(_A , _A) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : str = greedy_ids[:, input_ids.shape[1] :] _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(new_greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A , skip_prompt=_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : Optional[int] = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""distilgpt2""") _SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""").to(_A) _SCREAMING_SNAKE_CASE : int = -1 _SCREAMING_SNAKE_CASE : List[str] = torch.ones((1, 5) , device=_A).long() * model.config.bos_token_id with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Optional[int] = TextStreamer(_A , skip_special_tokens=_A) model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _SCREAMING_SNAKE_CASE : Optional[Any] = cs.out[:-1] # Remove the final "\n" _SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , return_tensors="""pt""") self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1)) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Tuple = -1 _SCREAMING_SNAKE_CASE : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : int = TextIteratorStreamer(_A , timeout=0.001) _SCREAMING_SNAKE_CASE : List[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[str] = Thread(target=model.generate , kwargs=_A) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_A): _SCREAMING_SNAKE_CASE : str = """""" for new_text in streamer: streamer_text += new_text
635
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''', } class _snake_case ( __snake_case ): """simple docstring""" a = "lxmert" a = {} def __init__( self : Tuple , _A : str=3_0_5_2_2 , _A : Optional[int]=7_6_8 , _A : Dict=1_2 , _A : Union[str, Any]=9_5_0_0 , _A : Union[str, Any]=1_6_0_0 , _A : Optional[Any]=4_0_0 , _A : str=3_0_7_2 , _A : Dict="gelu" , _A : List[Any]=0.1 , _A : Tuple=0.1 , _A : Optional[int]=5_1_2 , _A : str=2 , _A : Dict=0.02 , _A : int=1e-12 , _A : List[str]=9 , _A : List[str]=5 , _A : Optional[Any]=5 , _A : List[Any]=2_0_4_8 , _A : Optional[Any]=4 , _A : Dict=6.67 , _A : Any=True , _A : List[Any]=True , _A : Union[str, Any]=True , _A : Dict=True , _A : Optional[int]=True , _A : List[Any]=True , _A : List[Any]=True , **_A : int , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = vocab_size _SCREAMING_SNAKE_CASE : int = hidden_size _SCREAMING_SNAKE_CASE : Any = num_attention_heads _SCREAMING_SNAKE_CASE : List[str] = hidden_act _SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size _SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob _SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : int = max_position_embeddings _SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size _SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range _SCREAMING_SNAKE_CASE : Dict = layer_norm_eps _SCREAMING_SNAKE_CASE : List[str] = num_qa_labels _SCREAMING_SNAKE_CASE : int = num_object_labels _SCREAMING_SNAKE_CASE : Dict = num_attr_labels _SCREAMING_SNAKE_CASE : Optional[Any] = l_layers _SCREAMING_SNAKE_CASE : str = x_layers _SCREAMING_SNAKE_CASE : int = r_layers _SCREAMING_SNAKE_CASE : Optional[Any] = visual_feat_dim _SCREAMING_SNAKE_CASE : Dict = visual_pos_dim _SCREAMING_SNAKE_CASE : Union[str, Any] = visual_loss_normalizer _SCREAMING_SNAKE_CASE : List[str] = task_matched _SCREAMING_SNAKE_CASE : List[Any] = task_mask_lm _SCREAMING_SNAKE_CASE : Tuple = task_obj_predict _SCREAMING_SNAKE_CASE : str = task_qa _SCREAMING_SNAKE_CASE : Union[str, Any] = visual_obj_loss _SCREAMING_SNAKE_CASE : List[Any] = visual_attr_loss _SCREAMING_SNAKE_CASE : Optional[int] = visual_feat_loss _SCREAMING_SNAKE_CASE : Optional[int] = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers} super().__init__(**_A)
635
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class _snake_case ( __snake_case ): """simple docstring""" a = "facebook/bart-large-mnli" a = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) a = "text_classifier" a = AutoTokenizer a = AutoModelForSequenceClassification a = ["text", ["text"]] a = ["text"] def _lowerCAmelCase ( self : int): """simple docstring""" super().setup() _SCREAMING_SNAKE_CASE : Any = self.model.config _SCREAMING_SNAKE_CASE : Any = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("""entail"""): _SCREAMING_SNAKE_CASE : List[Any] = int(_A) if self.entailment_id == -1: raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""") def _lowerCAmelCase ( self : Optional[Any] , _A : Tuple , _A : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = labels return self.pre_processor( [text] * len(_A) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , ) def _lowerCAmelCase ( self : Tuple , _A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = outputs.logits _SCREAMING_SNAKE_CASE : List[Any] = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
635
1
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , ) assert hasattr(self , """env""") def _lowerCAmelCase ( self : Union[str, Any] , _A : str=1): """simple docstring""" return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , ) def _lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any]): """simple docstring""" TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""") def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.create_estimator() # run training estimator.fit() # result dataframe _SCREAMING_SNAKE_CASE : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""]) _SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""]) # get train time from SageMaker job, this includes starting, preprocessing, stopping _SCREAMING_SNAKE_CASE : int = ( Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy) assert all(t <= self.results["""eval_loss"""] for t in eval_loss) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
635
"""simple docstring""" import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class _snake_case ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""") _SCREAMING_SNAKE_CASE : str = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""") model.to(_A) from datasets import load_dataset _SCREAMING_SNAKE_CASE : Any = load_dataset("""nielsr/rvlcdip-demo""") _SCREAMING_SNAKE_CASE : Any = dataset["""train"""][0]["""image"""].convert("""RGB""") _SCREAMING_SNAKE_CASE : str = image_processor(_A , return_tensors="""pt""").to(_A) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE : Any = model(**_A) _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_6)) self.assertEqual(logits.shape , _A) _SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=_A , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4))
635
1
"""simple docstring""" import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = ['''model.decoder.embed_positions.weights'''] def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: if "emb" in name: _SCREAMING_SNAKE_CASE : List[Any] = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: _SCREAMING_SNAKE_CASE : List[str] = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: _SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: _SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: _SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: _SCREAMING_SNAKE_CASE : int = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: _SCREAMING_SNAKE_CASE : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: _SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: _SCREAMING_SNAKE_CASE : Tuple = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: _SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: _SCREAMING_SNAKE_CASE : str = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple[Dict, Dict]: _SCREAMING_SNAKE_CASE : str = list(state_dict.keys() ) _SCREAMING_SNAKE_CASE : Tuple = {} for key in keys: _SCREAMING_SNAKE_CASE : Dict = state_dict.pop(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = rename_keys(__SCREAMING_SNAKE_CASE ) if "in_proj_weight" in key: # split fused qkv proj _SCREAMING_SNAKE_CASE : str = val[:hidden_size, :] _SCREAMING_SNAKE_CASE : Any = val[hidden_size : 2 * hidden_size, :] _SCREAMING_SNAKE_CASE : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: _SCREAMING_SNAKE_CASE : int = val else: _SCREAMING_SNAKE_CASE : Dict = val return state_dict, enc_dec_proj_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> MusicgenDecoderConfig: if checkpoint == "small": # default config values _SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 _SCREAMING_SNAKE_CASE : str = 24 _SCREAMING_SNAKE_CASE : Any = 16 elif checkpoint == "medium": _SCREAMING_SNAKE_CASE : Dict = 1_536 _SCREAMING_SNAKE_CASE : Union[str, Any] = 48 _SCREAMING_SNAKE_CASE : Optional[Any] = 24 elif checkpoint == "large": _SCREAMING_SNAKE_CASE : List[Any] = 2_048 _SCREAMING_SNAKE_CASE : Optional[int] = 48 _SCREAMING_SNAKE_CASE : str = 32 else: raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = MusicgenDecoderConfig( hidden_size=__SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=__SCREAMING_SNAKE_CASE , num_attention_heads=__SCREAMING_SNAKE_CASE , ) return config @torch.no_grad() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="cpu" )-> str: _SCREAMING_SNAKE_CASE : str = MusicGen.get_pretrained(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = decoder_config_from_checkpoint(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = fairseq_model.lm.state_dict() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = rename_state_dict( __SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size ) _SCREAMING_SNAKE_CASE : Tuple = TaEncoderModel.from_pretrained("""t5-base""" ) _SCREAMING_SNAKE_CASE : List[Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) _SCREAMING_SNAKE_CASE : str = MusicgenForCausalLM(__SCREAMING_SNAKE_CASE ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" ) if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model _SCREAMING_SNAKE_CASE : Dict = MusicgenForConditionalGeneration(text_encoder=__SCREAMING_SNAKE_CASE , audio_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__SCREAMING_SNAKE_CASE ) # check we can do a forward pass _SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) _SCREAMING_SNAKE_CASE : Dict = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits if logits.shape != (8, 1, 2_048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""t5-base""" ) _SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) _SCREAMING_SNAKE_CASE : Optional[int] = MusicgenProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE ) # set the appropriate bos/pad token ids _SCREAMING_SNAKE_CASE : Optional[Any] = 2_048 _SCREAMING_SNAKE_CASE : List[Any] = 2_048 # set other default generation config params _SCREAMING_SNAKE_CASE : Any = int(30 * audio_encoder.config.frame_rate ) _SCREAMING_SNAKE_CASE : Tuple = True _SCREAMING_SNAKE_CASE : int = 3.0 if pytorch_dump_folder is not None: Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if repo_id: logger.info(F"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(__SCREAMING_SNAKE_CASE ) processor.push_to_hub(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint''', default='''small''', type=str, help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''', ) parser.add_argument( '''--pytorch_dump_folder''', required=True, default=None, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) parser.add_argument( '''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.''' ) lowerCAmelCase_ = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
635
"""simple docstring""" import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class _snake_case ( __snake_case ): """simple docstring""" a = "M-CLIP" def __init__( self : Optional[Any] , _A : List[str]=1_0_2_4 , _A : Union[str, Any]=7_6_8 , **_A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = transformerDimSize _SCREAMING_SNAKE_CASE : List[str] = imageDimSize super().__init__(**_A) class _snake_case ( __snake_case ): """simple docstring""" a = MCLIPConfig def __init__( self : Dict , _A : Optional[Any] , *_A : Any , **_A : Dict): """simple docstring""" super().__init__(_A , *_A , **_A) _SCREAMING_SNAKE_CASE : Tuple = XLMRobertaModel(_A) _SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims) def _lowerCAmelCase ( self : Union[str, Any] , _A : str , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.transformer(input_ids=_A , attention_mask=_A)[0] _SCREAMING_SNAKE_CASE : Optional[Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None] return self.LinearTransformation(_A), embs
635
1
"""simple docstring""" import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _snake_case ( __snake_case , unittest.TestCase ): """simple docstring""" a = None a = BloomTokenizerFast a = BloomTokenizerFast a = True a = False a = "tokenizer_file" a = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"} def _lowerCAmelCase ( self : Any): """simple docstring""" super().setUp() _SCREAMING_SNAKE_CASE : str = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""") tokenizer.save_pretrained(self.tmpdirname) def _lowerCAmelCase ( self : Any , **_A : str): """simple docstring""" kwargs.update(self.special_tokens_map) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_A) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE : Union[str, Any] = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""] _SCREAMING_SNAKE_CASE : Tuple = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]] _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.batch_encode_plus(_A)["""input_ids"""] self.assertListEqual(_A , _A) _SCREAMING_SNAKE_CASE : str = tokenizer.batch_decode(_A) self.assertListEqual(_A , _A) def _lowerCAmelCase ( self : str , _A : Union[str, Any]=6): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""): _SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input _SCREAMING_SNAKE_CASE : Dict = """This is a simple input""" _SCREAMING_SNAKE_CASE : List[Any] = ["""This is a simple input 1""", """This is a simple input 2"""] _SCREAMING_SNAKE_CASE : Dict = ("""This is a simple input""", """This is a pair""") _SCREAMING_SNAKE_CASE : List[Any] = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests try: tokenizer_r.encode(_A , max_length=_A) tokenizer_r.encode_plus(_A , max_length=_A) tokenizer_r.batch_encode_plus(_A , max_length=_A) tokenizer_r.encode(_A , max_length=_A) tokenizer_r.batch_encode_plus(_A , max_length=_A) except ValueError: self.fail("""Bloom Tokenizer should be able to deal with padding""") _SCREAMING_SNAKE_CASE : Optional[int] = None # Hotfixing padding = None self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding="""max_length""") # Simple input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding="""max_length""") # Simple input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding="""max_length""" , ) # Pair input self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding="""max_length""") # Pair input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding="""max_length""") # Pair input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding="""max_length""" , ) def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE : List[str] = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=_A) _SCREAMING_SNAKE_CASE : List[str] = next(iter(_A))["""premise"""] # pick up one data _SCREAMING_SNAKE_CASE : int = list(sample_data.values()) _SCREAMING_SNAKE_CASE : List[str] = list(map(tokenizer.encode , _A)) _SCREAMING_SNAKE_CASE : List[str] = [tokenizer.decode(_A , clean_up_tokenization_spaces=_A) for x in output_tokens] self.assertListEqual(_A , _A) def _lowerCAmelCase ( self : List[str]): """simple docstring""" self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
635
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) _SCREAMING_SNAKE_CASE : int = precision _SCREAMING_SNAKE_CASE : Dict = ceil(precision / 14 ) _SCREAMING_SNAKE_CASE : int = 426_880 * Decimal(10_005 ).sqrt() _SCREAMING_SNAKE_CASE : Union[str, Any] = 1 _SCREAMING_SNAKE_CASE : str = 13_591_409 _SCREAMING_SNAKE_CASE : Tuple = Decimal(__SCREAMING_SNAKE_CASE ) for k in range(1 , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__SCREAMING_SNAKE_CASE ) ** 3) linear_term += 545_140_134 exponential_term *= -262_537_412_640_768_000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(F"The first {n} digits of pi is: {pi(n)}")
635
1
"""simple docstring""" import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py lowerCAmelCase_ = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. lowerCAmelCase_ = direct_transformers_import(PATH_TO_TRANSFORMERS) lowerCAmelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` lowerCAmelCase_ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') lowerCAmelCase_ = { '''DecisionTransformerConfig''', '''EncoderDecoderConfig''', '''MusicgenConfig''', '''RagConfig''', '''SpeechEncoderDecoderConfig''', '''TimmBackboneConfig''', '''VisionEncoderDecoderConfig''', '''VisionTextDualEncoderConfig''', '''LlamaConfig''', } def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Dict = None # source code of `config_class` _SCREAMING_SNAKE_CASE : str = inspect.getsource(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = _re_checkpoint.findall(__SCREAMING_SNAKE_CASE ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("""/""" ): _SCREAMING_SNAKE_CASE : Optional[Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link _SCREAMING_SNAKE_CASE : Tuple = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: _SCREAMING_SNAKE_CASE : int = ckpt_name break return checkpoint def lowerCamelCase_()-> List[str]: _SCREAMING_SNAKE_CASE : str = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue _SCREAMING_SNAKE_CASE : int = get_checkpoint_from_config_class(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: _SCREAMING_SNAKE_CASE : List[Any] = """\n""".join(sorted(__SCREAMING_SNAKE_CASE ) ) raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
635
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _SCREAMING_SNAKE_CASE : Optional[int] = TapasConfig.from_json_file(__SCREAMING_SNAKE_CASE ) # set absolute/relative position embeddings parameter _SCREAMING_SNAKE_CASE : Dict = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _SCREAMING_SNAKE_CASE : str = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WTQ": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : Optional[int] = 4 _SCREAMING_SNAKE_CASE : Any = True # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 0.66_46_94 _SCREAMING_SNAKE_CASE : str = 0.20_79_51 _SCREAMING_SNAKE_CASE : str = 0.12_11_94 _SCREAMING_SNAKE_CASE : List[Any] = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = 0.0_35_25_13 _SCREAMING_SNAKE_CASE : Optional[Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : int = 4 _SCREAMING_SNAKE_CASE : Tuple = False # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 36.45_19 _SCREAMING_SNAKE_CASE : Union[str, Any] = 0.90_34_21 _SCREAMING_SNAKE_CASE : Optional[Any] = 2_22.0_88 _SCREAMING_SNAKE_CASE : Any = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Optional[int] = True _SCREAMING_SNAKE_CASE : Dict = 0.76_31_41 _SCREAMING_SNAKE_CASE : Union[str, Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "TABFACT": _SCREAMING_SNAKE_CASE : int = TapasForSequenceClassification(config=__SCREAMING_SNAKE_CASE ) elif task == "MLM": _SCREAMING_SNAKE_CASE : int = TapasForMaskedLM(config=__SCREAMING_SNAKE_CASE ) elif task == "INTERMEDIATE_PRETRAINING": _SCREAMING_SNAKE_CASE : int = TapasModel(config=__SCREAMING_SNAKE_CASE ) else: raise ValueError(F"""Task {task} not supported.""" ) print(F"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Save pytorch-model (weights and configuration) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Save tokenizer files print(F"""Save tokenizer files to {pytorch_dump_path}""" ) _SCREAMING_SNAKE_CASE : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 ) tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
635
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class _snake_case ( __snake_case ): """simple docstring""" a = "camembert" def __init__( self : Union[str, Any] , _A : Tuple=3_0_5_2_2 , _A : int=7_6_8 , _A : Dict=1_2 , _A : Union[str, Any]=1_2 , _A : List[str]=3_0_7_2 , _A : Union[str, Any]="gelu" , _A : Any=0.1 , _A : Tuple=0.1 , _A : List[Any]=5_1_2 , _A : Optional[Any]=2 , _A : Tuple=0.02 , _A : Optional[Any]=1e-12 , _A : str=1 , _A : Union[str, Any]=0 , _A : Optional[Any]=2 , _A : Union[str, Any]="absolute" , _A : Union[str, Any]=True , _A : Dict=None , **_A : Tuple , ): """simple docstring""" super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A) _SCREAMING_SNAKE_CASE : List[Any] = vocab_size _SCREAMING_SNAKE_CASE : List[Any] = hidden_size _SCREAMING_SNAKE_CASE : Any = num_hidden_layers _SCREAMING_SNAKE_CASE : List[str] = num_attention_heads _SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act _SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size _SCREAMING_SNAKE_CASE : str = hidden_dropout_prob _SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : str = max_position_embeddings _SCREAMING_SNAKE_CASE : int = type_vocab_size _SCREAMING_SNAKE_CASE : int = initializer_range _SCREAMING_SNAKE_CASE : int = layer_norm_eps _SCREAMING_SNAKE_CASE : Tuple = position_embedding_type _SCREAMING_SNAKE_CASE : str = use_cache _SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout class _snake_case ( __snake_case ): """simple docstring""" @property def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" if self.task == "multiple-choice": _SCREAMING_SNAKE_CASE : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _SCREAMING_SNAKE_CASE : List[Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ])
635
"""simple docstring""" from typing import Any import numpy as np def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool: return np.array_equal(__SCREAMING_SNAKE_CASE , matrix.conjugate().T ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: _SCREAMING_SNAKE_CASE : Optional[int] = v.conjugate().T _SCREAMING_SNAKE_CASE : Optional[int] = v_star.dot(__SCREAMING_SNAKE_CASE ) assert isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) return (v_star_dot.dot(__SCREAMING_SNAKE_CASE )) / (v_star.dot(__SCREAMING_SNAKE_CASE )) def lowerCamelCase_()-> None: _SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) _SCREAMING_SNAKE_CASE : int = np.array([[1], [2], [3]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" print(rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" assert rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
635
1
"""simple docstring""" from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''', } class _snake_case ( __snake_case ): """simple docstring""" a = "efficientnet" def __init__( self : int , _A : int = 3 , _A : int = 6_0_0 , _A : float = 2.0 , _A : float = 3.1 , _A : int = 8 , _A : List[int] = [3, 3, 5, 3, 5, 5, 3] , _A : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , _A : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , _A : List[int] = [] , _A : List[int] = [1, 2, 2, 2, 1, 2, 1] , _A : List[int] = [1, 2, 2, 3, 3, 4, 1] , _A : List[int] = [1, 6, 6, 6, 6, 6, 6] , _A : float = 0.25 , _A : str = "swish" , _A : int = 2_5_6_0 , _A : str = "mean" , _A : float = 0.02 , _A : float = 0.001 , _A : float = 0.99 , _A : float = 0.5 , _A : float = 0.2 , **_A : List[str] , ): """simple docstring""" super().__init__(**_A) _SCREAMING_SNAKE_CASE : List[str] = num_channels _SCREAMING_SNAKE_CASE : Union[str, Any] = image_size _SCREAMING_SNAKE_CASE : Optional[Any] = width_coefficient _SCREAMING_SNAKE_CASE : Tuple = depth_coefficient _SCREAMING_SNAKE_CASE : Dict = depth_divisor _SCREAMING_SNAKE_CASE : Optional[Any] = kernel_sizes _SCREAMING_SNAKE_CASE : Dict = in_channels _SCREAMING_SNAKE_CASE : Any = out_channels _SCREAMING_SNAKE_CASE : Dict = depthwise_padding _SCREAMING_SNAKE_CASE : str = strides _SCREAMING_SNAKE_CASE : Dict = num_block_repeats _SCREAMING_SNAKE_CASE : Tuple = expand_ratios _SCREAMING_SNAKE_CASE : int = squeeze_expansion_ratio _SCREAMING_SNAKE_CASE : int = hidden_act _SCREAMING_SNAKE_CASE : int = hidden_dim _SCREAMING_SNAKE_CASE : Union[str, Any] = pooling_type _SCREAMING_SNAKE_CASE : int = initializer_range _SCREAMING_SNAKE_CASE : List[str] = batch_norm_eps _SCREAMING_SNAKE_CASE : Tuple = batch_norm_momentum _SCREAMING_SNAKE_CASE : List[str] = dropout_rate _SCREAMING_SNAKE_CASE : Optional[Any] = drop_connect_rate _SCREAMING_SNAKE_CASE : int = sum(_A) * 4 class _snake_case ( __snake_case ): """simple docstring""" a = version.parse("1.11" ) @property def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ]) @property def _lowerCAmelCase ( self : str): """simple docstring""" return 1e-5
635
"""simple docstring""" from __future__ import annotations def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative in a semiconductor""" ) elif hole_conc < 0: raise ValueError("""Hole concentration cannot be negative in a semiconductor""" ) elif intrinsic_conc < 0: raise ValueError( """Intrinsic concentration cannot be negative in a semiconductor""" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
635
1
"""simple docstring""" import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class _snake_case ( __snake_case ): """simple docstring""" a = ["image_processor", "tokenizer"] a = "AutoImageProcessor" a = "AutoTokenizer" def __init__( self : int , _A : str=None , _A : Tuple=None , **_A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : int = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _A , ) _SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop("""feature_extractor""") _SCREAMING_SNAKE_CASE : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""") if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""") super().__init__(_A , _A) _SCREAMING_SNAKE_CASE : str = self.image_processor _SCREAMING_SNAKE_CASE : int = False def __call__( self : Optional[int] , *_A : Optional[int] , **_A : Optional[int]): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*_A , **_A) _SCREAMING_SNAKE_CASE : Dict = kwargs.pop("""images""" , _A) _SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop("""text""" , _A) if len(_A) > 0: _SCREAMING_SNAKE_CASE : Dict = args[0] _SCREAMING_SNAKE_CASE : Any = args[1:] if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""") if images is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor(_A , *_A , **_A) if text is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(_A , **_A) if text is None: return inputs elif images is None: return encodings else: _SCREAMING_SNAKE_CASE : Union[str, Any] = encodings["""input_ids"""] return inputs def _lowerCAmelCase ( self : int , *_A : Tuple , **_A : Dict): """simple docstring""" return self.tokenizer.batch_decode(*_A , **_A) def _lowerCAmelCase ( self : Tuple , *_A : Optional[Any] , **_A : Any): """simple docstring""" return self.tokenizer.decode(*_A , **_A) @contextmanager def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your images inputs, or in a separate call.""") _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer yield _SCREAMING_SNAKE_CASE : Dict = self.image_processor _SCREAMING_SNAKE_CASE : Dict = False def _lowerCAmelCase ( self : List[Any] , _A : str , _A : Any=False , _A : List[str]=None): """simple docstring""" if added_vocab is None: _SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.get_added_vocab() _SCREAMING_SNAKE_CASE : Optional[int] = {} while tokens: _SCREAMING_SNAKE_CASE : Union[str, Any] = re.search(r"""<s_(.*?)>""" , _A , re.IGNORECASE) if start_token is None: break _SCREAMING_SNAKE_CASE : Union[str, Any] = start_token.group(1) _SCREAMING_SNAKE_CASE : Tuple = re.search(rf"""</s_{key}>""" , _A , re.IGNORECASE) _SCREAMING_SNAKE_CASE : int = start_token.group() if end_token is None: _SCREAMING_SNAKE_CASE : str = tokens.replace(_A , """""") else: _SCREAMING_SNAKE_CASE : Any = end_token.group() _SCREAMING_SNAKE_CASE : List[str] = re.escape(_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = re.escape(_A) _SCREAMING_SNAKE_CASE : List[Any] = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , _A , re.IGNORECASE) if content is not None: _SCREAMING_SNAKE_CASE : str = content.group(1).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node _SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenajson(_A , is_inner_value=_A , added_vocab=_A) if value: if len(_A) == 1: _SCREAMING_SNAKE_CASE : Optional[Any] = value[0] _SCREAMING_SNAKE_CASE : Optional[int] = value else: # leaf nodes _SCREAMING_SNAKE_CASE : int = [] for leaf in content.split(r"""<sep/>"""): _SCREAMING_SNAKE_CASE : Optional[int] = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": _SCREAMING_SNAKE_CASE : List[str] = leaf[1:-2] # for categorical special tokens output[key].append(_A) if len(output[key]) == 1: _SCREAMING_SNAKE_CASE : str = output[key][0] _SCREAMING_SNAKE_CASE : Union[str, Any] = tokens[tokens.find(_A) + len(_A) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=_A , added_vocab=_A) if len(_A): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def _lowerCAmelCase ( self : List[str]): """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , ) return self.image_processor_class @property def _lowerCAmelCase ( self : List[Any]): """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _A , ) return self.image_processor
635
"""simple docstring""" import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase_ = 16 lowerCAmelCase_ = 32 def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 )-> str: _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetDict( { """train""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """validation""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """test""": dataset["""validation"""], } ) def tokenize_function(__SCREAMING_SNAKE_CASE ): # max_length=None => use the model max length (it's actually the default) _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _SCREAMING_SNAKE_CASE : str = datasets.map( __SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__SCREAMING_SNAKE_CASE ): # On TPU it's best to pad everything to the same length or training will be very slow. _SCREAMING_SNAKE_CASE : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _SCREAMING_SNAKE_CASE : Optional[Any] = 16 elif accelerator.mixed_precision != "no": _SCREAMING_SNAKE_CASE : Any = 8 else: _SCREAMING_SNAKE_CASE : Optional[int] = None return tokenizer.pad( __SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , ) # Instantiate dataloaders. _SCREAMING_SNAKE_CASE : int = DataLoader( tokenized_datasets["""train"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["""validation"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets["""test"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader, test_dataloader def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: # New Code # _SCREAMING_SNAKE_CASE : Union[str, Any] = [] # Download the dataset _SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""glue""" , """mrpc""" ) # Create our splits _SCREAMING_SNAKE_CASE : Dict = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator _SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _SCREAMING_SNAKE_CASE : Tuple = config["""lr"""] _SCREAMING_SNAKE_CASE : Tuple = int(config["""num_epochs"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""seed"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""batch_size"""] ) _SCREAMING_SNAKE_CASE : List[str] = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation _SCREAMING_SNAKE_CASE : Any = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _SCREAMING_SNAKE_CASE : List[str] = batch_size // MAX_GPU_BATCH_SIZE _SCREAMING_SNAKE_CASE : List[str] = MAX_GPU_BATCH_SIZE set_seed(__SCREAMING_SNAKE_CASE ) # New Code # # Create our folds: _SCREAMING_SNAKE_CASE : List[str] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] ) _SCREAMING_SNAKE_CASE : Optional[Any] = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = get_fold_dataloaders( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device ) # Instantiate optimizer _SCREAMING_SNAKE_CASE : int = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE ) # Instantiate scheduler _SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup( optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(__SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = outputs.loss _SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps accelerator.backward(__SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , ) _SCREAMING_SNAKE_CASE : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE ) # New Code # # We also run predictions on the test set at the very end _SCREAMING_SNAKE_CASE : str = [] for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: _SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) _SCREAMING_SNAKE_CASE : List[str] = torch.stack(__SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) _SCREAMING_SNAKE_CASE : int = metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE ) accelerator.print("""Average test metrics from all folds:""" , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_()-> Optional[Any]: _SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) # New Code # parser.add_argument("""--num_folds""" , type=__SCREAMING_SNAKE_CASE , default=3 , help="""The number of splits to perform across the dataset""" ) _SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() _SCREAMING_SNAKE_CASE : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
635
1
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> list: if n_term == "": return [] _SCREAMING_SNAKE_CASE : list = [] for temp in range(int(__SCREAMING_SNAKE_CASE ) ): series.append(F"""1/{temp + 1}""" if series else """1""" ) return series if __name__ == "__main__": lowerCAmelCase_ = input('''Enter the last number (nth term) of the Harmonic Series''') print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''') print(harmonic_series(nth_term))
635
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
1
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( __snake_case ): """simple docstring""" a = ["image_processor", "tokenizer"] a = "ChineseCLIPImageProcessor" a = ("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , _A : Tuple=None , _A : List[Any]=None , **_A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _A , ) _SCREAMING_SNAKE_CASE : str = kwargs.pop("""feature_extractor""") _SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""") if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""") super().__init__(_A , _A) _SCREAMING_SNAKE_CASE : Dict = self.image_processor def __call__( self : Optional[int] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , **_A : int): """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""") if text is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_A , return_tensors=_A , **_A) if images is not None: _SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(_A , return_tensors=_A , **_A) if text is not None and images is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A) , tensor_type=_A) def _lowerCAmelCase ( self : str , *_A : Any , **_A : Any): """simple docstring""" return self.tokenizer.batch_decode(*_A , **_A) def _lowerCAmelCase ( self : Union[str, Any] , *_A : List[Any] , **_A : Any): """simple docstring""" return self.tokenizer.decode(*_A , **_A) @property def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names _SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def _lowerCAmelCase ( self : List[str]): """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , ) return self.image_processor_class
635
"""simple docstring""" import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class _snake_case : """simple docstring""" def __init__( self : int , _A : List[Any] , _A : int , _A : int): """simple docstring""" if dst_width < 0 or dst_height < 0: raise ValueError("""Destination width/height should be > 0""") _SCREAMING_SNAKE_CASE : str = img _SCREAMING_SNAKE_CASE : Optional[Any] = img.shape[1] _SCREAMING_SNAKE_CASE : Tuple = img.shape[0] _SCREAMING_SNAKE_CASE : Any = dst_width _SCREAMING_SNAKE_CASE : Any = dst_height _SCREAMING_SNAKE_CASE : Any = self.src_w / self.dst_w _SCREAMING_SNAKE_CASE : Dict = self.src_h / self.dst_h _SCREAMING_SNAKE_CASE : Optional[Any] = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_5_5 ) def _lowerCAmelCase ( self : Tuple): """simple docstring""" for i in range(self.dst_h): for j in range(self.dst_w): _SCREAMING_SNAKE_CASE : Any = self.img[self.get_y(_A)][self.get_x(_A)] def _lowerCAmelCase ( self : int , _A : int): """simple docstring""" return int(self.ratio_x * x) def _lowerCAmelCase ( self : str , _A : int): """simple docstring""" return int(self.ratio_y * y) if __name__ == "__main__": lowerCAmelCase_ , lowerCAmelCase_ = 800, 600 lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1) lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output ) waitKey(0) destroyAllWindows()
635
1
"""simple docstring""" from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar lowerCAmelCase_ = TypeVar('''KEY''') lowerCAmelCase_ = TypeVar('''VAL''') @dataclass(frozen=__snake_case , slots=__snake_case ) class _snake_case ( Generic[KEY, VAL] ): """simple docstring""" a = 42 a = 42 class _snake_case ( _Item ): """simple docstring""" def __init__( self : Any): """simple docstring""" super().__init__(_A , _A) def __bool__( self : Any): """simple docstring""" return False lowerCAmelCase_ = _DeletedItem() class _snake_case ( MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self : Dict , _A : int = 8 , _A : float = 0.75): """simple docstring""" _SCREAMING_SNAKE_CASE : int = initial_block_size _SCREAMING_SNAKE_CASE : list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 _SCREAMING_SNAKE_CASE : Tuple = capacity_factor _SCREAMING_SNAKE_CASE : List[str] = 0 def _lowerCAmelCase ( self : Union[str, Any] , _A : KEY): """simple docstring""" return hash(_A) % len(self._buckets) def _lowerCAmelCase ( self : List[Any] , _A : int): """simple docstring""" return (ind + 1) % len(self._buckets) def _lowerCAmelCase ( self : List[str] , _A : int , _A : KEY , _A : VAL): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self._buckets[ind] if not stored: _SCREAMING_SNAKE_CASE : str = _Item(_A , _A) self._len += 1 return True elif stored.key == key: _SCREAMING_SNAKE_CASE : Union[str, Any] = _Item(_A , _A) return True else: return False def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = len(self._buckets) * self._capacity_factor return len(self) >= int(_A) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" if len(self._buckets) <= self._initial_block_size: return False _SCREAMING_SNAKE_CASE : Optional[int] = len(self._buckets) * self._capacity_factor / 2 return len(self) < limit def _lowerCAmelCase ( self : Optional[Any] , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = self._buckets _SCREAMING_SNAKE_CASE : List[Any] = [None] * new_size _SCREAMING_SNAKE_CASE : Tuple = 0 for item in old_buckets: if item: self._add_item(item.key , item.val) def _lowerCAmelCase ( self : int): """simple docstring""" self._resize(len(self._buckets) * 2) def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" self._resize(len(self._buckets) // 2) def _lowerCAmelCase ( self : Optional[Any] , _A : KEY): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = self._get_bucket_index(_A) for _ in range(len(self._buckets)): yield ind _SCREAMING_SNAKE_CASE : Any = self._get_next_ind(_A) def _lowerCAmelCase ( self : List[Any] , _A : KEY , _A : VAL): """simple docstring""" for ind in self._iterate_buckets(_A): if self._try_set(_A , _A , _A): break def __setitem__( self : Tuple , _A : KEY , _A : VAL): """simple docstring""" if self._is_full(): self._size_up() self._add_item(_A , _A) def __delitem__( self : Optional[Any] , _A : KEY): """simple docstring""" for ind in self._iterate_buckets(_A): _SCREAMING_SNAKE_CASE : str = self._buckets[ind] if item is None: raise KeyError(_A) if item is _deleted: continue if item.key == key: _SCREAMING_SNAKE_CASE : int = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Any , _A : KEY): """simple docstring""" for ind in self._iterate_buckets(_A): _SCREAMING_SNAKE_CASE : Optional[int] = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(_A) def __len__( self : List[Any]): """simple docstring""" return self._len def __iter__( self : str): """simple docstring""" yield from (item.key for item in self._buckets if item) def __repr__( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = """ ,""".join( f"""{item.key}: {item.val}""" for item in self._buckets if item) return f"""HashMap({val_string})"""
635
"""simple docstring""" import argparse from collections import defaultdict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : str = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines() _SCREAMING_SNAKE_CASE : Optional[Any] = F"""class {class_name}(""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{4 * " "}def {test_name}(""" _SCREAMING_SNAKE_CASE : Tuple = F"""{8 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{16 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[str] = False _SCREAMING_SNAKE_CASE : Tuple = False _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : Any = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = 0 _SCREAMING_SNAKE_CASE : Dict = [] for line in lines: if line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = True elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = True elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )): _SCREAMING_SNAKE_CASE : Dict = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _SCREAMING_SNAKE_CASE : int = True if in_class and in_func and in_line: if ")" not in line: continue else: _SCREAMING_SNAKE_CASE : Any = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) _SCREAMING_SNAKE_CASE : Optional[int] = False else: new_lines.append(__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , """w""" ) as f: for line in new_lines: f.write(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None )-> Optional[Any]: if fail is not None: with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()} else: _SCREAMING_SNAKE_CASE : str = None with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : str = f.readlines() _SCREAMING_SNAKE_CASE : str = defaultdict(__SCREAMING_SNAKE_CASE ) for line in correct_lines: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) lowerCAmelCase_ = parser.parse_args() main(args.correct_filename, args.fail_filename)
635
1
"""simple docstring""" import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase_ = 16 lowerCAmelCase_ = 32 def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 )-> List[str]: _SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(__SCREAMING_SNAKE_CASE ): # max_length=None => use the model max length (it's actually the default) _SCREAMING_SNAKE_CASE : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _SCREAMING_SNAKE_CASE : Optional[int] = datasets.map( __SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _SCREAMING_SNAKE_CASE : str = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__SCREAMING_SNAKE_CASE ): # On TPU it's best to pad everything to the same length or training will be very slow. _SCREAMING_SNAKE_CASE : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _SCREAMING_SNAKE_CASE : List[str] = 16 elif accelerator.mixed_precision != "no": _SCREAMING_SNAKE_CASE : List[Any] = 8 else: _SCREAMING_SNAKE_CASE : Union[str, Any] = None return tokenizer.pad( __SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , ) # Instantiate dataloaders. _SCREAMING_SNAKE_CASE : List[Any] = DataLoader( tokenized_datasets["""train"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , drop_last=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = DataLoader( tokenized_datasets["""validation"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , drop_last=(accelerator.mixed_precision == """fp8""") , ) return train_dataloader, eval_dataloader def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[Any]: # Initialize accelerator _SCREAMING_SNAKE_CASE : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _SCREAMING_SNAKE_CASE : str = config["""lr"""] _SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["""num_epochs"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""seed"""] ) _SCREAMING_SNAKE_CASE : List[str] = int(config["""batch_size"""] ) _SCREAMING_SNAKE_CASE : List[str] = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation _SCREAMING_SNAKE_CASE : Union[str, Any] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _SCREAMING_SNAKE_CASE : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE _SCREAMING_SNAKE_CASE : Any = MAX_GPU_BATCH_SIZE set_seed(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = get_dataloaders(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device ) # Instantiate optimizer _SCREAMING_SNAKE_CASE : List[str] = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE ) # Instantiate scheduler _SCREAMING_SNAKE_CASE : List[Any] = get_linear_schedule_with_warmup( optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = accelerator.prepare( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(__SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _SCREAMING_SNAKE_CASE : List[Any] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = outputs.loss _SCREAMING_SNAKE_CASE : str = loss / gradient_accumulation_steps accelerator.backward(__SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : int = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = outputs.logits.argmax(dim=-1 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , ) _SCREAMING_SNAKE_CASE : Optional[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_()-> Any: _SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) _SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
635
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowerCAmelCase_ = { '''text_branch''': '''text_model''', '''audio_branch''': '''audio_model.audio_encoder''', '''attn''': '''attention.self''', '''self.proj''': '''output.dense''', '''attention.self_mask''': '''attn_mask''', '''mlp.fc1''': '''intermediate.dense''', '''mlp.fc2''': '''output.dense''', '''norm1''': '''layernorm_before''', '''norm2''': '''layernorm_after''', '''bn0''': '''batch_norm''', } lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''') def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = create_model( """HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = {} _SCREAMING_SNAKE_CASE : Optional[Any] = R""".*sequential.(\d+).*""" _SCREAMING_SNAKE_CASE : Any = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # replace sequential layers with list _SCREAMING_SNAKE_CASE : List[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) _SCREAMING_SNAKE_CASE : Dict = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.""" ) elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... _SCREAMING_SNAKE_CASE : Dict = 1 if projecton_layer == 0 else 2 _SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value _SCREAMING_SNAKE_CASE : Dict = value _SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv.size(0 ) // 3 _SCREAMING_SNAKE_CASE : Optional[Any] = mixed_qkv[:qkv_dim] _SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim : qkv_dim * 2] _SCREAMING_SNAKE_CASE : Any = mixed_qkv[qkv_dim * 2 :] _SCREAMING_SNAKE_CASE : Dict = query_layer _SCREAMING_SNAKE_CASE : List[Any] = key_layer _SCREAMING_SNAKE_CASE : Dict = value_layer else: _SCREAMING_SNAKE_CASE : Optional[Any] = value return model_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> List[Any]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE ) clap_model.eval() _SCREAMING_SNAKE_CASE : Dict = clap_model.state_dict() _SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = ClapConfig() _SCREAMING_SNAKE_CASE : Tuple = enable_fusion _SCREAMING_SNAKE_CASE : Dict = ClapModel(__SCREAMING_SNAKE_CASE ) # ignore the spectrogram embedding layer model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''') lowerCAmelCase_ = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
635
1
"""simple docstring""" import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[Any]: _SCREAMING_SNAKE_CASE : int = SwinConfig.from_pretrained( """microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) _SCREAMING_SNAKE_CASE : List[Any] = MaskFormerConfig(backbone_config=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = """huggingface/label-files""" if "ade20k-full" in model_name: # this should be ok _SCREAMING_SNAKE_CASE : List[Any] = 847 _SCREAMING_SNAKE_CASE : str = """maskformer-ade20k-full-id2label.json""" elif "ade" in model_name: # this should be ok _SCREAMING_SNAKE_CASE : Union[str, Any] = 150 _SCREAMING_SNAKE_CASE : Optional[int] = """ade20k-id2label.json""" elif "coco-stuff" in model_name: # this should be ok _SCREAMING_SNAKE_CASE : Dict = 171 _SCREAMING_SNAKE_CASE : int = """maskformer-coco-stuff-id2label.json""" elif "coco" in model_name: # TODO _SCREAMING_SNAKE_CASE : str = 133 _SCREAMING_SNAKE_CASE : List[str] = """coco-panoptic-id2label.json""" elif "cityscapes" in model_name: # this should be ok _SCREAMING_SNAKE_CASE : int = 19 _SCREAMING_SNAKE_CASE : Union[str, Any] = """cityscapes-id2label.json""" elif "vistas" in model_name: # this should be ok _SCREAMING_SNAKE_CASE : Optional[int] = 65 _SCREAMING_SNAKE_CASE : Tuple = """mapillary-vistas-id2label.json""" _SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) _SCREAMING_SNAKE_CASE : List[str] = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} return config def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : str = [] # stem # fmt: off rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") ) rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") ) # FPN rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") ) rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") ) rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") ) rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") ) rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") ) # cross-attention out projection rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") ) # MLP 1 rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") ) # MLP 2 rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") ) # layernorm 1 (self-attention layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") ) # layernorm 3 (final layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") ) # heads on top rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") ) for i in range(3 ): rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") ) # fmt: on return rename_keys def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : str = dct.pop(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = val def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _SCREAMING_SNAKE_CASE : Optional[Any] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" ) _SCREAMING_SNAKE_CASE : int = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict _SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[:dim, :] _SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[: dim] _SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[ dim : dim * 2, : ] _SCREAMING_SNAKE_CASE : Any = in_proj_bias[ dim : dim * 2 ] _SCREAMING_SNAKE_CASE : int = in_proj_weight[ -dim :, : ] _SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[-dim :] # fmt: on def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: # fmt: off _SCREAMING_SNAKE_CASE : List[str] = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) _SCREAMING_SNAKE_CASE : Any = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" ) _SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[: hidden_size, :] _SCREAMING_SNAKE_CASE : str = in_proj_bias[:config.hidden_size] _SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[hidden_size : hidden_size * 2, :] _SCREAMING_SNAKE_CASE : List[str] = in_proj_bias[hidden_size : hidden_size * 2] _SCREAMING_SNAKE_CASE : str = in_proj_weight[-hidden_size :, :] _SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) _SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" ) _SCREAMING_SNAKE_CASE : str = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[: hidden_size, :] _SCREAMING_SNAKE_CASE : Dict = in_proj_bias[:config.hidden_size] _SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight[hidden_size : hidden_size * 2, :] _SCREAMING_SNAKE_CASE : Any = in_proj_bias[hidden_size : hidden_size * 2] _SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[-hidden_size :, :] _SCREAMING_SNAKE_CASE : Any = in_proj_bias[-hidden_size :] # fmt: on def lowerCamelCase_()-> torch.Tensor: _SCREAMING_SNAKE_CASE : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" _SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False )-> int: _SCREAMING_SNAKE_CASE : List[Any] = get_maskformer_config(__SCREAMING_SNAKE_CASE ) # load original state_dict with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = pickle.load(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = data["""model"""] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys _SCREAMING_SNAKE_CASE : Tuple = create_rename_keys(__SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) read_in_swin_q_k_v(__SCREAMING_SNAKE_CASE , config.backbone_config ) read_in_decoder_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # update to torch tensors for key, value in state_dict.items(): _SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # load 🤗 model _SCREAMING_SNAKE_CASE : Tuple = MaskFormerForInstanceSegmentation(__SCREAMING_SNAKE_CASE ) model.eval() for name, param in model.named_parameters(): print(__SCREAMING_SNAKE_CASE , param.shape ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(__SCREAMING_SNAKE_CASE ) == 0, F"""Unexpected keys: {unexpected_keys}""" # verify results _SCREAMING_SNAKE_CASE : str = prepare_img() if "vistas" in model_name: _SCREAMING_SNAKE_CASE : List[str] = 65 elif "cityscapes" in model_name: _SCREAMING_SNAKE_CASE : Dict = 65_535 else: _SCREAMING_SNAKE_CASE : Tuple = 255 _SCREAMING_SNAKE_CASE : str = True if """ade""" in model_name else False _SCREAMING_SNAKE_CASE : int = MaskFormerImageProcessor(ignore_index=__SCREAMING_SNAKE_CASE , reduce_labels=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE ) print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": _SCREAMING_SNAKE_CASE : int = torch.tensor( [[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" ) Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: print("""Pushing model and image processor to the hub...""" ) model.push_to_hub(F"""nielsr/{model_name}""" ) image_processor.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''maskformer-swin-tiny-ade''', type=str, help=('''Name of the MaskFormer model you\'d like to convert''',), ) parser.add_argument( '''--checkpoint_path''', default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''', type=str, help='''Path to the original state dict (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCAmelCase_ = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
635
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , ) assert hasattr(self , """env""") def _lowerCAmelCase ( self : Union[str, Any] , _A : str=1): """simple docstring""" return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , ) def _lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any]): """simple docstring""" TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""") def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.create_estimator() # run training estimator.fit() # result dataframe _SCREAMING_SNAKE_CASE : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""]) _SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""]) # get train time from SageMaker job, this includes starting, preprocessing, stopping _SCREAMING_SNAKE_CASE : int = ( Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy) assert all(t <= self.results["""eval_loss"""] for t in eval_loss) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
635
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all BART models at https://huggingface.co/models?filter=bart lowerCAmelCase_ = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''', }, } lowerCAmelCase_ = { '''facebook/bart-base''': 1024, '''facebook/bart-large''': 1024, '''facebook/bart-large-mnli''': 1024, '''facebook/bart-large-cnn''': 1024, '''facebook/bart-large-xsum''': 1024, '''yjernite/bart_eli5''': 1024, } class _snake_case ( __snake_case ): """simple docstring""" a = VOCAB_FILES_NAMES a = PRETRAINED_VOCAB_FILES_MAP a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a = ["input_ids", "attention_mask"] a = BartTokenizer def __init__( self : Tuple , _A : List[str]=None , _A : List[Any]=None , _A : Optional[Any]=None , _A : Tuple="replace" , _A : Tuple="<s>" , _A : List[str]="</s>" , _A : Union[str, Any]="</s>" , _A : int="<s>" , _A : Tuple="<unk>" , _A : int="<pad>" , _A : Union[str, Any]="<mask>" , _A : Optional[int]=False , _A : Dict=True , **_A : List[str] , ): """simple docstring""" super().__init__( _A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , ) _SCREAMING_SNAKE_CASE : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("""add_prefix_space""" , _A) != add_prefix_space: _SCREAMING_SNAKE_CASE : Optional[int] = getattr(_A , pre_tok_state.pop("""type""")) _SCREAMING_SNAKE_CASE : Optional[Any] = add_prefix_space _SCREAMING_SNAKE_CASE : str = pre_tok_class(**_A) _SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _SCREAMING_SNAKE_CASE : Tuple = """post_processor""" _SCREAMING_SNAKE_CASE : List[str] = getattr(self.backend_tokenizer , _A , _A) if tokenizer_component_instance: _SCREAMING_SNAKE_CASE : Optional[int] = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _SCREAMING_SNAKE_CASE : Tuple = tuple(state["""sep"""]) if "cls" in state: _SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state["""cls"""]) _SCREAMING_SNAKE_CASE : Optional[Any] = False if state.get("""add_prefix_space""" , _A) != add_prefix_space: _SCREAMING_SNAKE_CASE : List[Any] = add_prefix_space _SCREAMING_SNAKE_CASE : Dict = True if state.get("""trim_offsets""" , _A) != trim_offsets: _SCREAMING_SNAKE_CASE : Union[str, Any] = trim_offsets _SCREAMING_SNAKE_CASE : Optional[Any] = True if changes_to_apply: _SCREAMING_SNAKE_CASE : Any = getattr(_A , state.pop("""type""")) _SCREAMING_SNAKE_CASE : Tuple = component_class(**_A) setattr(self.backend_tokenizer , _A , _A) @property def _lowerCAmelCase ( self : int): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""") return None return str(self._mask_token) @mask_token.setter def _lowerCAmelCase ( self : str , _A : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else value _SCREAMING_SNAKE_CASE : str = value def _lowerCAmelCase ( self : int , *_A : Union[str, Any] , **_A : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = kwargs.get("""is_split_into_words""" , _A) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ """to use it with pretokenized inputs.""") return super()._batch_encode_plus(*_A , **_A) def _lowerCAmelCase ( self : Dict , *_A : Dict , **_A : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.get("""is_split_into_words""" , _A) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ """to use it with pretokenized inputs.""") return super()._encode_plus(*_A , **_A) def _lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[str] = None): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(_A , name=_A) return tuple(_A) def _lowerCAmelCase ( self : List[str] , _A : Optional[int] , _A : Union[str, Any]=None): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowerCAmelCase ( self : List[str] , _A : List[int] , _A : Optional[List[int]] = None): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] _SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
635
"""simple docstring""" import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip lowerCAmelCase_ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: return max(metric_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for gt in ground_truths ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Dict = [] if args.gold_data_mode == "qa": _SCREAMING_SNAKE_CASE : int = pd.read_csv(__SCREAMING_SNAKE_CASE , sep="""\t""" , header=__SCREAMING_SNAKE_CASE ) for answer_list in data[1]: _SCREAMING_SNAKE_CASE : Union[str, Any] = ast.literal_eval(__SCREAMING_SNAKE_CASE ) answers.append(__SCREAMING_SNAKE_CASE ) else: _SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[int] = [[reference] for reference in references] _SCREAMING_SNAKE_CASE : Optional[int] = 0 for prediction, ground_truths in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): total += 1 em += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) fa += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = 1_00.0 * em / total _SCREAMING_SNAKE_CASE : Optional[Any] = 1_00.0 * fa / total logger.info(F"""F1: {fa:.2f}""" ) logger.info(F"""EM: {em:.2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = args.k _SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[Any] = 0 for hypo, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = set(hypo.split("""\t""" )[:k] ) _SCREAMING_SNAKE_CASE : Union[str, Any] = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k _SCREAMING_SNAKE_CASE : int = 1_00.0 * em / total logger.info(F"""Precision@{k}: {em: .2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: def strip_title(__SCREAMING_SNAKE_CASE ): if title.startswith("""\"""" ): _SCREAMING_SNAKE_CASE : Optional[int] = title[1:] if title.endswith("""\"""" ): _SCREAMING_SNAKE_CASE : str = title[:-1] return title _SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device ) _SCREAMING_SNAKE_CASE : List[str] = rag_model.rag.question_encoder(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = question_enc_outputs[0] _SCREAMING_SNAKE_CASE : List[Any] = rag_model.retriever( __SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) _SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for docs in all_docs: _SCREAMING_SNAKE_CASE : str = [strip_title(__SCREAMING_SNAKE_CASE ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(__SCREAMING_SNAKE_CASE ) ) return provenance_strings def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.attention_mask.to(args.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.generate( # rag_model overwrites generate __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) _SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) if args.print_predictions: for q, a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): logger.info("""Q: {} - A: {}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) return answers def lowerCamelCase_()-> List[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__SCREAMING_SNAKE_CASE , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=__SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=__SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=__SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__SCREAMING_SNAKE_CASE , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=__SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=__SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=__SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=50 , type=__SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) _SCREAMING_SNAKE_CASE : Dict = parser.parse_args() _SCREAMING_SNAKE_CASE : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : Union[str, Any] = {} if args.model_type is None: _SCREAMING_SNAKE_CASE : Optional[int] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration _SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs if args.index_name is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = args.index_name if args.index_path is not None: _SCREAMING_SNAKE_CASE : Any = args.index_path else: _SCREAMING_SNAKE_CASE : Any = BartForConditionalGeneration _SCREAMING_SNAKE_CASE : int = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = get_scores if args.eval_mode == """e2e""" else get_precision_at_k _SCREAMING_SNAKE_CASE : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(__SCREAMING_SNAKE_CASE ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , retriever=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.retriever.init_retrieval() else: _SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: _SCREAMING_SNAKE_CASE : str = [] for line in tqdm(__SCREAMING_SNAKE_CASE ): questions.append(line.strip() ) if len(__SCREAMING_SNAKE_CASE ) == args.eval_batch_size: _SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) + """\n""" ) preds_file.flush() _SCREAMING_SNAKE_CASE : Any = [] if len(__SCREAMING_SNAKE_CASE ) > 0: _SCREAMING_SNAKE_CASE : List[str] = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) ) preds_file.flush() score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": lowerCAmelCase_ = get_args() main(args)
635
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _snake_case ( __snake_case ): """simple docstring""" def __init__( self : List[Any] , _A : List[str] , _A : List[str]): """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM _SCREAMING_SNAKE_CASE : Union[str, Any] = DDIMScheduler.from_config(scheduler.config) self.register_modules(unet=_A , scheduler=_A) @torch.no_grad() def __call__( self : str , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : float = 0.0 , _A : int = 5_0 , _A : Optional[bool] = None , _A : Optional[str] = "pil" , _A : bool = True , ): """simple docstring""" if isinstance(self.unet.config.sample_size , _A): _SCREAMING_SNAKE_CASE : Optional[Any] = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: _SCREAMING_SNAKE_CASE : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(_A , _A) and len(_A) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(_A)}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""") _SCREAMING_SNAKE_CASE : Tuple = randn_tensor(_A , generator=_A , device=self.device , dtype=self.unet.dtype) # set step values self.scheduler.set_timesteps(_A) for t in self.progress_bar(self.scheduler.timesteps): # 1. predict noise model_output _SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet(_A , _A).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 _SCREAMING_SNAKE_CASE : str = self.scheduler.step( _A , _A , _A , eta=_A , use_clipped_model_output=_A , generator=_A).prev_sample _SCREAMING_SNAKE_CASE : List[str] = (image / 2 + 0.5).clamp(0 , 1) _SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": _SCREAMING_SNAKE_CASE : List[Any] = self.numpy_to_pil(_A) if not return_dict: return (image,) return ImagePipelineOutput(images=_A)
635
"""simple docstring""" import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def lowerCamelCase_(__SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=1_026 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , __SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , )-> Union[str, Any]: set_seed(3 ) # generate train_data and objective_set _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = generate_datasets( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , number=__SCREAMING_SNAKE_CASE , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? _SCREAMING_SNAKE_CASE : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # load pretrained model _SCREAMING_SNAKE_CASE : Any = load_gpta("""gpt2""" ).to(__SCREAMING_SNAKE_CASE ) print("""computing perplexity on objective set""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).item() print("""perplexity on objective set:""" , __SCREAMING_SNAKE_CASE ) # collect igf pairs and save to file demo.jbl collect_objective_set(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=15 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE="igf_model.pt" , )-> Optional[int]: set_seed(42 ) # Load pre-trained model _SCREAMING_SNAKE_CASE : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" ) # Initialize secondary learner to use embedding weights of model _SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(__SCREAMING_SNAKE_CASE ) # Train secondary learner _SCREAMING_SNAKE_CASE : Any = train_secondary_learner( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_epochs=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=__SCREAMING_SNAKE_CASE , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_000 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=recopy_gpta , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = RandomSampler(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(__SCREAMING_SNAKE_CASE )) + 1 _SCREAMING_SNAKE_CASE : List[Any] = 0 _SCREAMING_SNAKE_CASE : Any = torch.zeros((1, context_len) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = recopy_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) model.train() if secondary_learner is not None: secondary_learner.to(__SCREAMING_SNAKE_CASE ) secondary_learner.eval() _SCREAMING_SNAKE_CASE : Dict = [] _SCREAMING_SNAKE_CASE : Optional[int] = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = [] _SCREAMING_SNAKE_CASE : int = [] # Compute the performance of the transformer model at the beginning _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) for epoch in range(int(__SCREAMING_SNAKE_CASE ) ): for step, example in enumerate(__SCREAMING_SNAKE_CASE ): torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 ) _SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() _SCREAMING_SNAKE_CASE : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = True if secondary_learner is not None: _SCREAMING_SNAKE_CASE : List[Any] = secondary_learner.forward( torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item() observed_qs.append(float(__SCREAMING_SNAKE_CASE ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: _SCREAMING_SNAKE_CASE : Dict = -1 if predicted_q < threshold: _SCREAMING_SNAKE_CASE : List[str] = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) _SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def lowerCamelCase_()-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" ) # Required parameters parser.add_argument( """--data_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain data files for WikiText.""" , ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help=( """A jbl file containing tokenized data which can be split as objective dataset, """ """train_dataset and test_dataset.""" ) , ) parser.add_argument( """--igf_data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , ) parser.add_argument( """--output_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The output directory where the final fine-tuned model is stored.""" , ) parser.add_argument( """--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument("""--seed""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A seed for reproducible training.""" ) parser.add_argument( """--context_len""" , default=32 , type=__SCREAMING_SNAKE_CASE , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--size_objective_set""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""number of articles that are long enough to be used as our objective set""" , ) parser.add_argument( """--eval_freq""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""secondary model evaluation is triggered at eval_freq""" ) parser.add_argument("""--max_steps""" , default=1_000 , type=__SCREAMING_SNAKE_CASE , help="""To calculate training epochs""" ) parser.add_argument( """--secondary_learner_batch_size""" , default=128 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data for secondary learner""" , ) parser.add_argument( """--batch_size""" , default=16 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data of language model(gpt2) """ ) parser.add_argument( """--eval_interval""" , default=10 , type=__SCREAMING_SNAKE_CASE , help=( """decay the selectivity of our secondary learner filter from""" """1 standard deviation above average to 1 below average after 10 batches""" ) , ) parser.add_argument( """--number""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""The number of examples split to be used as objective_set/test_data""" ) parser.add_argument( """--min_len""" , default=1_026 , type=__SCREAMING_SNAKE_CASE , help="""The minimum length of the article to be used as objective set""" ) parser.add_argument( """--secondary_learner_max_epochs""" , default=15 , type=__SCREAMING_SNAKE_CASE , help="""number of epochs to train secondary learner""" ) parser.add_argument("""--trim""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""truncate the example if it exceeds context length""" ) parser.add_argument( """--threshold""" , default=1.0 , type=__SCREAMING_SNAKE_CASE , help=( """The threshold value used by secondary learner to filter the train_data and allow only""" """ informative data as input to the model""" ) , ) parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__SCREAMING_SNAKE_CASE , help="""finetuned_model_name""" ) parser.add_argument( """--recopy_model""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , ) # Load train data for secondary learner _SCREAMING_SNAKE_CASE : Optional[int] = joblib.load("""data/IGF_values.jbl""" ) # Train secondary learner _SCREAMING_SNAKE_CASE : int = training_secondary_learner( __SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , ) # load pretrained gpt2 model _SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = generate_datasets( context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=__SCREAMING_SNAKE_CASE , secondary_learner=__SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , ) if __name__ == "__main__": main()
635
1
"""simple docstring""" import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _snake_case : """simple docstring""" def __init__( self : Tuple , _A : str , _A : int=1_3 , _A : List[str]=3_0 , _A : Dict=2 , _A : Optional[Any]=3 , _A : Tuple=True , _A : Union[str, Any]=True , _A : Optional[int]=3_2 , _A : int=5 , _A : Union[str, Any]=4 , _A : List[Any]=3_7 , _A : Tuple="gelu" , _A : Optional[int]=0.1 , _A : Union[str, Any]=0.1 , _A : Union[str, Any]=1_0 , _A : int=0.02 , _A : int=None , ): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = parent _SCREAMING_SNAKE_CASE : str = batch_size _SCREAMING_SNAKE_CASE : int = image_size _SCREAMING_SNAKE_CASE : Optional[Any] = patch_size _SCREAMING_SNAKE_CASE : Dict = num_channels _SCREAMING_SNAKE_CASE : int = is_training _SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels _SCREAMING_SNAKE_CASE : str = hidden_size _SCREAMING_SNAKE_CASE : Any = num_hidden_layers _SCREAMING_SNAKE_CASE : Tuple = num_attention_heads _SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size _SCREAMING_SNAKE_CASE : Optional[int] = hidden_act _SCREAMING_SNAKE_CASE : int = hidden_dropout_prob _SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size _SCREAMING_SNAKE_CASE : Optional[int] = initializer_range _SCREAMING_SNAKE_CASE : str = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _SCREAMING_SNAKE_CASE : Any = (image_size // patch_size) ** 2 _SCREAMING_SNAKE_CASE : List[str] = num_patches + 1 def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _SCREAMING_SNAKE_CASE : Optional[int] = None if self.use_labels: _SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size) _SCREAMING_SNAKE_CASE : List[Any] = self.get_config() return config, pixel_values, labels def _lowerCAmelCase ( self : str): """simple docstring""" return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def _lowerCAmelCase ( self : Tuple , _A : List[str] , _A : Any , _A : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = ViTMSNModel(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : List[Any] = model(_A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _lowerCAmelCase ( self : int , _A : str , _A : Optional[int] , _A : str): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = self.type_sequence_label_size _SCREAMING_SNAKE_CASE : Any = ViTMSNForImageClassification(_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : Dict = model(_A , labels=_A) print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""") print("""Labels: {labels}""") self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images _SCREAMING_SNAKE_CASE : int = 1 _SCREAMING_SNAKE_CASE : Any = ViTMSNForImageClassification(_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) _SCREAMING_SNAKE_CASE : List[str] = model(_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = config_and_inputs _SCREAMING_SNAKE_CASE : str = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): """simple docstring""" a = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () a = ( {"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification} if is_torch_available() else {} ) a = False a = False a = False a = False def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = ViTMSNModelTester(self) _SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMSN does not use inputs_embeds""") def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" pass def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : List[str] = model_class(_A) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _SCREAMING_SNAKE_CASE : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , nn.Linear)) def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : Optional[Any] = model_class(_A) _SCREAMING_SNAKE_CASE : List[Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _SCREAMING_SNAKE_CASE : List[str] = [*signature.parameters.keys()] _SCREAMING_SNAKE_CASE : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _A) def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A) def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A) @slow def _lowerCAmelCase ( self : Dict): """simple docstring""" for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE : List[Any] = ViTMSNModel.from_pretrained(_A) self.assertIsNotNone(_A) def lowerCamelCase_()-> int: _SCREAMING_SNAKE_CASE : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def _lowerCAmelCase ( self : Dict): """simple docstring""" return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""") if is_vision_available() else None @slow def _lowerCAmelCase ( self : List[Any]): """simple docstring""" torch.manual_seed(2) _SCREAMING_SNAKE_CASE : Any = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""").to(_A) _SCREAMING_SNAKE_CASE : Any = self.default_image_processor _SCREAMING_SNAKE_CASE : List[Any] = prepare_img() _SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(images=_A , return_tensors="""pt""").to(_A) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[Any] = model(**_A) # verify the logits _SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , _A) _SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-0.0_803, -0.4_454, -0.2_375]).to(_A) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4))
635
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( __snake_case ): """simple docstring""" a = ["image_processor", "tokenizer"] a = "ChineseCLIPImageProcessor" a = ("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , _A : Tuple=None , _A : List[Any]=None , **_A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _A , ) _SCREAMING_SNAKE_CASE : str = kwargs.pop("""feature_extractor""") _SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""") if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""") super().__init__(_A , _A) _SCREAMING_SNAKE_CASE : Dict = self.image_processor def __call__( self : Optional[int] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , **_A : int): """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""") if text is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_A , return_tensors=_A , **_A) if images is not None: _SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(_A , return_tensors=_A , **_A) if text is not None and images is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A) , tensor_type=_A) def _lowerCAmelCase ( self : str , *_A : Any , **_A : Any): """simple docstring""" return self.tokenizer.batch_decode(*_A , **_A) def _lowerCAmelCase ( self : Union[str, Any] , *_A : List[Any] , **_A : Any): """simple docstring""" return self.tokenizer.decode(*_A , **_A) @property def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names _SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def _lowerCAmelCase ( self : List[str]): """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , ) return self.image_processor_class
635
1
"""simple docstring""" import os import pytest from attr import dataclass lowerCAmelCase_ = '''us-east-1''' # defaults region @dataclass class _snake_case : """simple docstring""" a = 42 a = "arn:aws:iam::558105141721:role/sagemaker_execution_role" a = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 5_00, "save_steps": 55_00, } a = {**hyperparameters, "max_steps": 10_00} @property def _lowerCAmelCase ( self : List[str]): """simple docstring""" if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" return f"""{self.framework}-transfromers-test""" @property def _lowerCAmelCase ( self : List[Any]): """simple docstring""" return f"""./tests/sagemaker/scripts/{self.framework}""" @property def _lowerCAmelCase ( self : Dict): """simple docstring""" if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="""class""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: _SCREAMING_SNAKE_CASE : int = SageMakerTestEnvironment(framework=request.cls.framework )
635
"""simple docstring""" import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = ['''model.decoder.embed_positions.weights'''] def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: if "emb" in name: _SCREAMING_SNAKE_CASE : List[Any] = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: _SCREAMING_SNAKE_CASE : List[str] = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: _SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: _SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: _SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: _SCREAMING_SNAKE_CASE : int = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: _SCREAMING_SNAKE_CASE : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: _SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: _SCREAMING_SNAKE_CASE : Tuple = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: _SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: _SCREAMING_SNAKE_CASE : str = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple[Dict, Dict]: _SCREAMING_SNAKE_CASE : str = list(state_dict.keys() ) _SCREAMING_SNAKE_CASE : Tuple = {} for key in keys: _SCREAMING_SNAKE_CASE : Dict = state_dict.pop(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = rename_keys(__SCREAMING_SNAKE_CASE ) if "in_proj_weight" in key: # split fused qkv proj _SCREAMING_SNAKE_CASE : str = val[:hidden_size, :] _SCREAMING_SNAKE_CASE : Any = val[hidden_size : 2 * hidden_size, :] _SCREAMING_SNAKE_CASE : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: _SCREAMING_SNAKE_CASE : int = val else: _SCREAMING_SNAKE_CASE : Dict = val return state_dict, enc_dec_proj_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> MusicgenDecoderConfig: if checkpoint == "small": # default config values _SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 _SCREAMING_SNAKE_CASE : str = 24 _SCREAMING_SNAKE_CASE : Any = 16 elif checkpoint == "medium": _SCREAMING_SNAKE_CASE : Dict = 1_536 _SCREAMING_SNAKE_CASE : Union[str, Any] = 48 _SCREAMING_SNAKE_CASE : Optional[Any] = 24 elif checkpoint == "large": _SCREAMING_SNAKE_CASE : List[Any] = 2_048 _SCREAMING_SNAKE_CASE : Optional[int] = 48 _SCREAMING_SNAKE_CASE : str = 32 else: raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = MusicgenDecoderConfig( hidden_size=__SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=__SCREAMING_SNAKE_CASE , num_attention_heads=__SCREAMING_SNAKE_CASE , ) return config @torch.no_grad() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="cpu" )-> str: _SCREAMING_SNAKE_CASE : str = MusicGen.get_pretrained(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = decoder_config_from_checkpoint(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = fairseq_model.lm.state_dict() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = rename_state_dict( __SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size ) _SCREAMING_SNAKE_CASE : Tuple = TaEncoderModel.from_pretrained("""t5-base""" ) _SCREAMING_SNAKE_CASE : List[Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) _SCREAMING_SNAKE_CASE : str = MusicgenForCausalLM(__SCREAMING_SNAKE_CASE ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" ) if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model _SCREAMING_SNAKE_CASE : Dict = MusicgenForConditionalGeneration(text_encoder=__SCREAMING_SNAKE_CASE , audio_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__SCREAMING_SNAKE_CASE ) # check we can do a forward pass _SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) _SCREAMING_SNAKE_CASE : Dict = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits if logits.shape != (8, 1, 2_048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""t5-base""" ) _SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) _SCREAMING_SNAKE_CASE : Optional[int] = MusicgenProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE ) # set the appropriate bos/pad token ids _SCREAMING_SNAKE_CASE : Optional[Any] = 2_048 _SCREAMING_SNAKE_CASE : List[Any] = 2_048 # set other default generation config params _SCREAMING_SNAKE_CASE : Any = int(30 * audio_encoder.config.frame_rate ) _SCREAMING_SNAKE_CASE : Tuple = True _SCREAMING_SNAKE_CASE : int = 3.0 if pytorch_dump_folder is not None: Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if repo_id: logger.info(F"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(__SCREAMING_SNAKE_CASE ) processor.push_to_hub(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint''', default='''small''', type=str, help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''', ) parser.add_argument( '''--pytorch_dump_folder''', required=True, default=None, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) parser.add_argument( '''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.''' ) lowerCAmelCase_ = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
635
1
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : """simple docstring""" def __init__( self : List[str] , _A : Optional[Any] , _A : List[Any]=1_3 , _A : int=3_2 , _A : int=3 , _A : Tuple=4 , _A : str=[1_0, 2_0, 3_0, 4_0] , _A : Optional[int]=[2, 2, 3, 2] , _A : int=True , _A : int=True , _A : Tuple=3_7 , _A : List[str]="gelu" , _A : Optional[int]=1_0 , _A : Union[str, Any]=0.02 , _A : Dict=["stage2", "stage3", "stage4"] , _A : str=3 , _A : Dict=None , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = parent _SCREAMING_SNAKE_CASE : int = batch_size _SCREAMING_SNAKE_CASE : int = image_size _SCREAMING_SNAKE_CASE : str = num_channels _SCREAMING_SNAKE_CASE : Union[str, Any] = num_stages _SCREAMING_SNAKE_CASE : Any = hidden_sizes _SCREAMING_SNAKE_CASE : Union[str, Any] = depths _SCREAMING_SNAKE_CASE : int = is_training _SCREAMING_SNAKE_CASE : int = use_labels _SCREAMING_SNAKE_CASE : Any = intermediate_size _SCREAMING_SNAKE_CASE : List[Any] = hidden_act _SCREAMING_SNAKE_CASE : int = type_sequence_label_size _SCREAMING_SNAKE_CASE : Dict = initializer_range _SCREAMING_SNAKE_CASE : str = out_features _SCREAMING_SNAKE_CASE : Any = num_labels _SCREAMING_SNAKE_CASE : Union[str, Any] = scope _SCREAMING_SNAKE_CASE : str = num_stages def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _SCREAMING_SNAKE_CASE : List[str] = None if self.use_labels: _SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size) _SCREAMING_SNAKE_CASE : List[Any] = self.get_config() return config, pixel_values, labels def _lowerCAmelCase ( self : List[str]): """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_A , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=_A , loss_ignore_index=2_5_5 , num_labels=self.num_labels , ) def _lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = UperNetForSemanticSegmentation(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : Dict = model(_A) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size)) def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs() ( ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ) : Dict = config_and_inputs _SCREAMING_SNAKE_CASE : Dict = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): """simple docstring""" a = (UperNetForSemanticSegmentation,) if is_torch_available() else () a = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} a = False a = False a = False a = False a = False a = False def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = UperNetModelTester(self) _SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7) def _lowerCAmelCase ( self : List[str]): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCAmelCase ( self : List[Any]): """simple docstring""" return def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : Optional[int] = model_class(_A) _SCREAMING_SNAKE_CASE : Dict = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()] _SCREAMING_SNAKE_CASE : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _A) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_A) @unittest.skip(reason="""UperNet does not use inputs_embeds""") def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" pass @unittest.skip(reason="""UperNet does not support input and output embeddings""") def _lowerCAmelCase ( self : List[Any]): """simple docstring""" pass @unittest.skip(reason="""UperNet does not have a base model""") def _lowerCAmelCase ( self : str): """simple docstring""" pass @unittest.skip(reason="""UperNet does not have a base model""") def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""") def _lowerCAmelCase ( self : List[str]): """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""") def _lowerCAmelCase ( self : List[Any]): """simple docstring""" pass def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" def check_hidden_states_output(_A : List[Any] , _A : Any , _A : str): _SCREAMING_SNAKE_CASE : int = model_class(_A) model.to(_A) model.eval() with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(_A , _A)) _SCREAMING_SNAKE_CASE : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _SCREAMING_SNAKE_CASE : Tuple = self.model_tester.num_stages self.assertEqual(len(_A) , expected_num_stages + 1) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : Union[str, Any] = True check_hidden_states_output(_A , _A , _A) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _SCREAMING_SNAKE_CASE : Dict = True check_hidden_states_output(_A , _A , _A) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE : Dict = _config_zero_init(_A) _SCREAMING_SNAKE_CASE : Optional[int] = _config_zero_init(configs_no_init.backbone_config) for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : Optional[int] = model_class(config=_A) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip(reason="""UperNet does not have tied weights""") def _lowerCAmelCase ( self : str): """simple docstring""" pass @slow def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE : Any = UperNetForSemanticSegmentation.from_pretrained(_A) self.assertIsNotNone(_A) def lowerCamelCase_()-> Dict: _SCREAMING_SNAKE_CASE : str = hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) _SCREAMING_SNAKE_CASE : List[Any] = Image.open(__SCREAMING_SNAKE_CASE ).convert("""RGB""" ) return image @require_torch @require_vision @slow class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""") _SCREAMING_SNAKE_CASE : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""").to(_A) _SCREAMING_SNAKE_CASE : Dict = prepare_img() _SCREAMING_SNAKE_CASE : List[Any] = processor(images=_A , return_tensors="""pt""").to(_A) with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[Any] = model(**_A) _SCREAMING_SNAKE_CASE : str = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2)) self.assertEqual(outputs.logits.shape , _A) _SCREAMING_SNAKE_CASE : str = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]]).to(_A) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1e-4)) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""") _SCREAMING_SNAKE_CASE : int = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""").to(_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img() _SCREAMING_SNAKE_CASE : Dict = processor(images=_A , return_tensors="""pt""").to(_A) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[Any] = model(**_A) _SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2)) self.assertEqual(outputs.logits.shape , _A) _SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]]).to(_A) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1e-4))
635
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''', # See all SEW models at https://huggingface.co/models?filter=sew } class _snake_case ( __snake_case ): """simple docstring""" a = "sew" def __init__( self : List[Any] , _A : Tuple=3_2 , _A : str=7_6_8 , _A : Dict=1_2 , _A : Tuple=1_2 , _A : Optional[Any]=3_0_7_2 , _A : List[str]=2 , _A : Dict="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.0 , _A : str=0.1 , _A : Tuple=0.1 , _A : Optional[int]=0.02 , _A : Dict=1e-5 , _A : str="group" , _A : Tuple="gelu" , _A : Union[str, Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _A : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Tuple=False , _A : Tuple=1_2_8 , _A : int=1_6 , _A : Union[str, Any]=True , _A : Optional[Any]=0.05 , _A : List[Any]=1_0 , _A : Union[str, Any]=2 , _A : Tuple=0.0 , _A : Union[str, Any]=1_0 , _A : Optional[int]=0 , _A : Union[str, Any]="mean" , _A : Optional[int]=False , _A : List[Any]=False , _A : int=2_5_6 , _A : str=0 , _A : Optional[int]=1 , _A : List[Any]=2 , **_A : Dict , ): """simple docstring""" super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A) _SCREAMING_SNAKE_CASE : str = hidden_size _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation _SCREAMING_SNAKE_CASE : Dict = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : str = conv_bias _SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embeddings _SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups _SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim) _SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers _SCREAMING_SNAKE_CASE : List[str] = intermediate_size _SCREAMING_SNAKE_CASE : str = squeeze_factor _SCREAMING_SNAKE_CASE : Dict = hidden_act _SCREAMING_SNAKE_CASE : str = num_attention_heads _SCREAMING_SNAKE_CASE : Dict = hidden_dropout _SCREAMING_SNAKE_CASE : Tuple = attention_dropout _SCREAMING_SNAKE_CASE : int = activation_dropout _SCREAMING_SNAKE_CASE : Any = feat_proj_dropout _SCREAMING_SNAKE_CASE : str = final_dropout _SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop _SCREAMING_SNAKE_CASE : Any = layer_norm_eps _SCREAMING_SNAKE_CASE : int = initializer_range _SCREAMING_SNAKE_CASE : List[Any] = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""") # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment _SCREAMING_SNAKE_CASE : List[Any] = mask_time_prob _SCREAMING_SNAKE_CASE : List[str] = mask_time_length _SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_prob _SCREAMING_SNAKE_CASE : int = mask_feature_length _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks # ctc loss _SCREAMING_SNAKE_CASE : int = ctc_loss_reduction _SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity # sequence classification _SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum _SCREAMING_SNAKE_CASE : List[str] = classifier_proj_size @property def _lowerCAmelCase ( self : Any): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1)
635
1
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: def get_masked_lm_array(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = F"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE""" _SCREAMING_SNAKE_CASE : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: _SCREAMING_SNAKE_CASE : Dict = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_array(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = F"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE""" _SCREAMING_SNAKE_CASE : Any = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: _SCREAMING_SNAKE_CASE : Tuple = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_layer_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = F"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE""" _SCREAMING_SNAKE_CASE : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: _SCREAMING_SNAKE_CASE : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = F"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE""" _SCREAMING_SNAKE_CASE : Dict = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : str = array.reshape(__SCREAMING_SNAKE_CASE ) if "kernel" in name: _SCREAMING_SNAKE_CASE : Optional[Any] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) print(F"""Loading model based on config from {config_path}...""" ) _SCREAMING_SNAKE_CASE : int = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = BertForMaskedLM(__SCREAMING_SNAKE_CASE ) # Layers for layer_index in range(0 , config.num_hidden_layers ): _SCREAMING_SNAKE_CASE : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention _SCREAMING_SNAKE_CASE : BertSelfAttention = layer.attention.self _SCREAMING_SNAKE_CASE : Union[str, Any] = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , """_query_dense/kernel""" , self_attn.query.weight.data.shape ) _SCREAMING_SNAKE_CASE : List[Any] = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , """_query_dense/bias""" , self_attn.query.bias.data.shape ) _SCREAMING_SNAKE_CASE : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , """_key_dense/kernel""" , self_attn.key.weight.data.shape ) _SCREAMING_SNAKE_CASE : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , """_key_dense/bias""" , self_attn.key.bias.data.shape ) _SCREAMING_SNAKE_CASE : Union[str, Any] = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , """_value_dense/kernel""" , self_attn.value.weight.data.shape ) _SCREAMING_SNAKE_CASE : Any = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , """_value_dense/bias""" , self_attn.value.bias.data.shape ) # Self-attention Output _SCREAMING_SNAKE_CASE : BertSelfOutput = layer.attention.output _SCREAMING_SNAKE_CASE : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , """_output_dense/kernel""" , self_output.dense.weight.data.shape ) _SCREAMING_SNAKE_CASE : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , """_output_dense/bias""" , self_output.dense.bias.data.shape ) _SCREAMING_SNAKE_CASE : Dict = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , """_attention_layer_norm/gamma""" ) _SCREAMING_SNAKE_CASE : str = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , """_attention_layer_norm/beta""" ) # Intermediate _SCREAMING_SNAKE_CASE : BertIntermediate = layer.intermediate _SCREAMING_SNAKE_CASE : str = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , """_intermediate_dense/kernel""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , """_intermediate_dense/bias""" ) # Output _SCREAMING_SNAKE_CASE : BertOutput = layer.output _SCREAMING_SNAKE_CASE : List[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , """_output_dense/kernel""" ) _SCREAMING_SNAKE_CASE : List[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , """_output_dense/bias""" ) _SCREAMING_SNAKE_CASE : Dict = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , """_output_layer_norm/gamma""" ) _SCREAMING_SNAKE_CASE : str = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , """_output_layer_norm/beta""" ) # Embeddings _SCREAMING_SNAKE_CASE : Optional[Any] = get_encoder_array("""_position_embedding_layer/embeddings""" ) _SCREAMING_SNAKE_CASE : Tuple = get_encoder_array("""_type_embedding_layer/embeddings""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = get_encoder_array("""_embedding_norm_layer/gamma""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = get_encoder_array("""_embedding_norm_layer/beta""" ) # LM Head _SCREAMING_SNAKE_CASE : List[Any] = model.cls.predictions.transform _SCREAMING_SNAKE_CASE : str = get_masked_lm_array("""dense/kernel""" ) _SCREAMING_SNAKE_CASE : Any = get_masked_lm_array("""dense/bias""" ) _SCREAMING_SNAKE_CASE : List[str] = get_masked_lm_array("""layer_norm/gamma""" ) _SCREAMING_SNAKE_CASE : Optional[int] = get_masked_lm_array("""layer_norm/beta""" ) _SCREAMING_SNAKE_CASE : Optional[int] = get_masked_lm_array("""embedding_table""" ) # Pooling _SCREAMING_SNAKE_CASE : Union[str, Any] = BertPooler(config=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : BertPooler = get_encoder_array("""_pooler_layer/kernel""" ) _SCREAMING_SNAKE_CASE : BertPooler = get_encoder_array("""_pooler_layer/bias""" ) # Export final model model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Integration test - should load without any errors ;) _SCREAMING_SNAKE_CASE : int = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE ) print(new_model.eval() ) print("""Model conversion was done sucessfully!""" ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', type=str, required=True, help='''The config json file corresponding to the BERT model. This specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', type=str, required=True, help='''Path to the output PyTorch model.''', ) lowerCAmelCase_ = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
635
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''', '''UniSpeechForCTC''', '''UniSpeechForPreTraining''', '''UniSpeechForSequenceClassification''', '''UniSpeechModel''', '''UniSpeechPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase_ = [ '''small''', '''small-base''', '''medium''', '''medium-base''', '''intermediate''', '''intermediate-base''', '''large''', '''large-base''', '''xlarge''', '''xlarge-base''', ] lowerCAmelCase_ = { '''vocab_file''': { '''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''', '''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''', '''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''', '''funnel-transformer/medium-base''': ( '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt''' ), '''funnel-transformer/intermediate''': ( '''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt''' ), '''funnel-transformer/intermediate-base''': ( '''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt''' ), '''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''', '''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''', '''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''', '''funnel-transformer/xlarge-base''': ( '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''', '''funnel-transformer/small-base''': ( '''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json''' ), '''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''', '''funnel-transformer/medium-base''': ( '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json''' ), '''funnel-transformer/intermediate''': ( '''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json''' ), '''funnel-transformer/intermediate-base''': ( '''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json''' ), '''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''', '''funnel-transformer/large-base''': ( '''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json''' ), '''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''', '''funnel-transformer/xlarge-base''': ( '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase_ = {F"funnel-transformer/{name}": 512 for name in _model_names} lowerCAmelCase_ = {F"funnel-transformer/{name}": {'''do_lower_case''': True} for name in _model_names} class _snake_case ( __snake_case ): """simple docstring""" a = VOCAB_FILES_NAMES a = PRETRAINED_VOCAB_FILES_MAP a = PRETRAINED_INIT_CONFIGURATION a = FunnelTokenizer a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a = 2 def __init__( self : int , _A : Union[str, Any]=None , _A : Any=None , _A : List[str]=True , _A : Optional[Any]="<unk>" , _A : Optional[int]="<sep>" , _A : int="<pad>" , _A : int="<cls>" , _A : Optional[Any]="<mask>" , _A : Tuple="<s>" , _A : Optional[int]="</s>" , _A : List[Any]=True , _A : Optional[Any]=True , _A : List[Any]=None , _A : List[Any]="##" , **_A : Optional[Any] , ): """simple docstring""" super().__init__( _A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , bos_token=_A , eos_token=_A , clean_text=_A , tokenize_chinese_chars=_A , strip_accents=_A , wordpieces_prefix=_A , **_A , ) _SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("""lowercase""" , _A) != do_lower_case or normalizer_state.get("""strip_accents""" , _A) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , _A) != tokenize_chinese_chars ): _SCREAMING_SNAKE_CASE : List[str] = getattr(_A , normalizer_state.pop("""type""")) _SCREAMING_SNAKE_CASE : Tuple = do_lower_case _SCREAMING_SNAKE_CASE : Optional[Any] = strip_accents _SCREAMING_SNAKE_CASE : Any = tokenize_chinese_chars _SCREAMING_SNAKE_CASE : Tuple = normalizer_class(**_A) _SCREAMING_SNAKE_CASE : Dict = do_lower_case def _lowerCAmelCase ( self : int , _A : Optional[int] , _A : List[str]=None): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowerCAmelCase ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id] _SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0] return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def _lowerCAmelCase ( self : Tuple , _A : str , _A : Optional[str] = None): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = self._tokenizer.model.save(_A , name=_A) return tuple(_A)
635
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : int = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : List[Any] = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str: if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = parquet_path elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] _SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=("train",) )-> Union[str, Any]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for split in splits: _SCREAMING_SNAKE_CASE : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : Dict = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: _SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[str] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : int = ParquetDatasetReader({"""train""": parquet_path} , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: if split: _SCREAMING_SNAKE_CASE : Union[str, Any] = {split: parquet_path} else: _SCREAMING_SNAKE_CASE : Optional[int] = """train""" _SCREAMING_SNAKE_CASE : Any = {"""train""": parquet_path, """test""": parquet_path} _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: _SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / """foo.parquet""" ) _SCREAMING_SNAKE_CASE : str = pf.read() assert dataset.data.table == output_table def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Dict = str(shared_datadir / """test_image_rgb.jpg""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = {"""image""": [image_path]} _SCREAMING_SNAKE_CASE : Optional[Any] = Features({"""image""": Image()} ) _SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__SCREAMING_SNAKE_CASE ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: assert get_writer_batch_size(__SCREAMING_SNAKE_CASE ) == expected
635
1
"""simple docstring""" class _snake_case ( __snake_case ): """simple docstring""" pass class _snake_case ( __snake_case ): """simple docstring""" pass class _snake_case : """simple docstring""" def __init__( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : int = [ [], [], [], ] def _lowerCAmelCase ( self : int , _A : int , _A : int): """simple docstring""" try: if len(self.queues[priority]) >= 1_0_0: raise OverflowError("""Maximum queue size is 100""") self.queues[priority].append(_A) except IndexError: raise ValueError("""Valid priorities are 0, 1, and 2""") def _lowerCAmelCase ( self : List[Any]): """simple docstring""" for queue in self.queues: if queue: return queue.pop(0) raise UnderFlowError("""All queues are empty""") def __str__( self : Union[str, Any]): """simple docstring""" return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues)) class _snake_case : """simple docstring""" def __init__( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : int = [] def _lowerCAmelCase ( self : List[str] , _A : int): """simple docstring""" if len(self.queue) == 1_0_0: raise OverFlowError("""Maximum queue size is 100""") self.queue.append(_A) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" if not self.queue: raise UnderFlowError("""The queue is empty""") else: _SCREAMING_SNAKE_CASE : Tuple = min(self.queue) self.queue.remove(_A) return data def __str__( self : int): """simple docstring""" return str(self.queue) def lowerCamelCase_()-> Optional[Any]: _SCREAMING_SNAKE_CASE : Any = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 100 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 128 ) print(__SCREAMING_SNAKE_CASE ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(__SCREAMING_SNAKE_CASE ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def lowerCamelCase_()-> str: _SCREAMING_SNAKE_CASE : Tuple = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(100 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(128 ) print(__SCREAMING_SNAKE_CASE ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(__SCREAMING_SNAKE_CASE ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
635
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""only integers accepted as input""" ) else: _SCREAMING_SNAKE_CASE : List[Any] = str(abs(__SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : List[str] = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )] for index in range(len(__SCREAMING_SNAKE_CASE ) ): num_transpositions[index].pop(__SCREAMING_SNAKE_CASE ) return max( int("""""".join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
635
1
"""simple docstring""" import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : """simple docstring""" def __init__( self : Any , _A : int , _A : Optional[Any]=1_3 , _A : Dict=3_2 , _A : int=2 , _A : Optional[Any]=3 , _A : Tuple=1_6 , _A : Tuple=[3_2, 6_4, 1_2_8] , _A : Optional[int]=[1, 2, 1] , _A : List[str]=[2, 2, 4] , _A : Optional[Any]=2 , _A : int=2.0 , _A : Optional[int]=True , _A : Optional[Any]=0.0 , _A : Tuple=0.0 , _A : Union[str, Any]=0.1 , _A : Optional[Any]="gelu" , _A : List[str]=False , _A : int=True , _A : List[str]=0.02 , _A : Tuple=1e-5 , _A : List[str]=True , _A : Optional[int]=None , _A : Dict=True , _A : Any=1_0 , _A : int=8 , _A : Tuple=["stage1", "stage2"] , _A : str=[1, 2] , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = parent _SCREAMING_SNAKE_CASE : Any = batch_size _SCREAMING_SNAKE_CASE : Tuple = image_size _SCREAMING_SNAKE_CASE : int = patch_size _SCREAMING_SNAKE_CASE : List[Any] = num_channels _SCREAMING_SNAKE_CASE : List[str] = embed_dim _SCREAMING_SNAKE_CASE : Dict = hidden_sizes _SCREAMING_SNAKE_CASE : Dict = depths _SCREAMING_SNAKE_CASE : List[Any] = num_heads _SCREAMING_SNAKE_CASE : List[str] = window_size _SCREAMING_SNAKE_CASE : Any = mlp_ratio _SCREAMING_SNAKE_CASE : str = qkv_bias _SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob _SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : Union[str, Any] = drop_path_rate _SCREAMING_SNAKE_CASE : List[str] = hidden_act _SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_embeddings _SCREAMING_SNAKE_CASE : List[Any] = patch_norm _SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps _SCREAMING_SNAKE_CASE : str = initializer_range _SCREAMING_SNAKE_CASE : Optional[Any] = is_training _SCREAMING_SNAKE_CASE : str = scope _SCREAMING_SNAKE_CASE : Optional[int] = use_labels _SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size _SCREAMING_SNAKE_CASE : int = encoder_stride _SCREAMING_SNAKE_CASE : List[str] = out_features _SCREAMING_SNAKE_CASE : List[str] = out_indices def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _SCREAMING_SNAKE_CASE : Dict = None if self.use_labels: _SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size) _SCREAMING_SNAKE_CASE : Tuple = self.get_config() return config, pixel_values, labels def _lowerCAmelCase ( self : str): """simple docstring""" return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _lowerCAmelCase ( self : List[Any] , _A : List[Any] , _A : Tuple , _A : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = FocalNetModel(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : Dict = model(_A) _SCREAMING_SNAKE_CASE : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) _SCREAMING_SNAKE_CASE : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim)) def _lowerCAmelCase ( self : str , _A : Optional[Any] , _A : Any , _A : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = FocalNetBackbone(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : Tuple = model(_A) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size, 8, 8]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1]) # verify backbone works with out_features=None _SCREAMING_SNAKE_CASE : Any = None _SCREAMING_SNAKE_CASE : str = FocalNetBackbone(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : Tuple = model(_A) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , 1) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size * 2, 4, 4]) # verify channels self.parent.assertEqual(len(model.channels) , 1) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]]) def _lowerCAmelCase ( self : Optional[int] , _A : Any , _A : Union[str, Any] , _A : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = FocalNetForMaskedImageModeling(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : Tuple = model(_A) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size)) # test greyscale images _SCREAMING_SNAKE_CASE : Dict = 1 _SCREAMING_SNAKE_CASE : Optional[Any] = FocalNetForMaskedImageModeling(_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) _SCREAMING_SNAKE_CASE : Optional[Any] = model(_A) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size)) def _lowerCAmelCase ( self : int , _A : Dict , _A : Tuple , _A : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = self.type_sequence_label_size _SCREAMING_SNAKE_CASE : Optional[Any] = FocalNetForImageClassification(_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : str = model(_A , labels=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images _SCREAMING_SNAKE_CASE : List[str] = 1 _SCREAMING_SNAKE_CASE : List[str] = FocalNetForImageClassification(_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) _SCREAMING_SNAKE_CASE : Dict = model(_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = config_and_inputs _SCREAMING_SNAKE_CASE : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): """simple docstring""" a = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) a = ( {"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification} if is_torch_available() else {} ) a = False a = False a = False a = False a = False def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = FocalNetModelTester(self) _SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=_A , embed_dim=3_7 , has_text_modality=_A) def _lowerCAmelCase ( self : Dict): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" return def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A) def _lowerCAmelCase ( self : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_A) def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_A) def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A) @unittest.skip(reason="""FocalNet does not use inputs_embeds""") def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" pass @unittest.skip(reason="""FocalNet does not use feedforward chunking""") def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" pass def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _SCREAMING_SNAKE_CASE : List[Any] = model_class(_A) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _SCREAMING_SNAKE_CASE : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , nn.Linear)) def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(_A) _SCREAMING_SNAKE_CASE : Dict = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()] _SCREAMING_SNAKE_CASE : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _A) def _lowerCAmelCase ( self : int , _A : Any , _A : Any , _A : Dict , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = model_class(_A) model.to(_A) model.eval() with torch.no_grad(): _SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(_A , _A)) _SCREAMING_SNAKE_CASE : List[str] = outputs.hidden_states _SCREAMING_SNAKE_CASE : int = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths) + 1) self.assertEqual(len(_A) , _A) # FocalNet has a different seq_length _SCREAMING_SNAKE_CASE : List[str] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _SCREAMING_SNAKE_CASE : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) _SCREAMING_SNAKE_CASE : str = outputs.reshaped_hidden_states self.assertEqual(len(_A) , _A) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = reshaped_hidden_states[0].shape _SCREAMING_SNAKE_CASE : Optional[Any] = ( reshaped_hidden_states[0].view(_A , _A , height * width).permute(0 , 2 , 1) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE : List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: _SCREAMING_SNAKE_CASE : List[str] = True self.check_hidden_states_output(_A , _A , _A , _A) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _SCREAMING_SNAKE_CASE : Optional[Any] = True self.check_hidden_states_output(_A , _A , _A , _A) def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE : Union[str, Any] = 3 _SCREAMING_SNAKE_CASE : Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) _SCREAMING_SNAKE_CASE : str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _SCREAMING_SNAKE_CASE : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _SCREAMING_SNAKE_CASE : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: _SCREAMING_SNAKE_CASE : List[Any] = True self.check_hidden_states_output(_A , _A , _A , (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _SCREAMING_SNAKE_CASE : Tuple = True self.check_hidden_states_output(_A , _A , _A , (padded_height, padded_width)) @slow def _lowerCAmelCase ( self : Dict): """simple docstring""" for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE : Union[str, Any] = FocalNetModel.from_pretrained(_A) self.assertIsNotNone(_A) def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE : str = _config_zero_init(_A) for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : int = model_class(config=_A) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class _snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def _lowerCAmelCase ( self : Any): """simple docstring""" return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""") if is_vision_available() else None @slow def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""").to(_A) _SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor _SCREAMING_SNAKE_CASE : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""") _SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=_A , return_tensors="""pt""").to(_A) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[Any] = model(**_A) # verify the logits _SCREAMING_SNAKE_CASE : int = torch.Size((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , _A) _SCREAMING_SNAKE_CASE : Dict = torch.tensor([0.2_166, -0.4_368, 0.2_191]).to(_A) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4)) self.assertTrue(outputs.logits.argmax(dim=-1).item() , 2_8_1) @require_torch class _snake_case ( __snake_case , unittest.TestCase ): """simple docstring""" a = (FocalNetBackbone,) if is_torch_available() else () a = FocalNetConfig a = False def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : int = FocalNetModelTester(self)
635
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Dict = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : str = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : List[Any] = -1 _SCREAMING_SNAKE_CASE : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Any = tokenizer.decode(greedy_ids[0]) _SCREAMING_SNAKE_CASE : List[Any] = TextIteratorStreamer(_A) _SCREAMING_SNAKE_CASE : Any = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[Any] = Thread(target=model.generate , kwargs=_A) thread.start() _SCREAMING_SNAKE_CASE : Any = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(_A , _A) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : str = greedy_ids[:, input_ids.shape[1] :] _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(new_greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A , skip_prompt=_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : Optional[int] = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""distilgpt2""") _SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""").to(_A) _SCREAMING_SNAKE_CASE : int = -1 _SCREAMING_SNAKE_CASE : List[str] = torch.ones((1, 5) , device=_A).long() * model.config.bos_token_id with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Optional[int] = TextStreamer(_A , skip_special_tokens=_A) model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _SCREAMING_SNAKE_CASE : Optional[Any] = cs.out[:-1] # Remove the final "\n" _SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , return_tensors="""pt""") self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1)) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Tuple = -1 _SCREAMING_SNAKE_CASE : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : int = TextIteratorStreamer(_A , timeout=0.001) _SCREAMING_SNAKE_CASE : List[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[str] = Thread(target=model.generate , kwargs=_A) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_A): _SCREAMING_SNAKE_CASE : str = """""" for new_text in streamer: streamer_text += new_text
635
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''', '''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''', '''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''', '''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''', '''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''', '''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''', '''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''', '''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''', '''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''', '''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''', } class _snake_case ( __snake_case ): """simple docstring""" a = "xlm" a = { "hidden_size": "emb_dim", "num_attention_heads": "n_heads", "num_hidden_layers": "n_layers", "n_words": "vocab_size", # For backward compatibility } def __init__( self : Optional[Any] , _A : Tuple=3_0_1_4_5 , _A : int=2_0_4_8 , _A : int=1_2 , _A : Any=1_6 , _A : Dict=0.1 , _A : List[str]=0.1 , _A : Any=True , _A : Dict=False , _A : Dict=False , _A : List[Any]=False , _A : Tuple=1 , _A : List[Any]=True , _A : int=5_1_2 , _A : Optional[int]=2_0_4_8**-0.5 , _A : Union[str, Any]=1e-12 , _A : Tuple=0.02 , _A : Optional[Any]=0 , _A : int=1 , _A : Dict=2 , _A : Union[str, Any]=3 , _A : int=5 , _A : List[str]=True , _A : str="first" , _A : int=True , _A : str=None , _A : Optional[int]=True , _A : Any=0.1 , _A : List[Any]=5 , _A : int=5 , _A : Any=0 , _A : Dict=0 , _A : Union[str, Any]=2 , _A : Tuple=0 , **_A : Union[str, Any] , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size _SCREAMING_SNAKE_CASE : Any = emb_dim _SCREAMING_SNAKE_CASE : int = n_layers _SCREAMING_SNAKE_CASE : str = n_heads _SCREAMING_SNAKE_CASE : Union[str, Any] = dropout _SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout _SCREAMING_SNAKE_CASE : Optional[int] = gelu_activation _SCREAMING_SNAKE_CASE : List[Any] = sinusoidal_embeddings _SCREAMING_SNAKE_CASE : List[str] = causal _SCREAMING_SNAKE_CASE : Optional[Any] = asm _SCREAMING_SNAKE_CASE : Optional[Any] = n_langs _SCREAMING_SNAKE_CASE : Optional[int] = use_lang_emb _SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps _SCREAMING_SNAKE_CASE : List[str] = bos_index _SCREAMING_SNAKE_CASE : Tuple = eos_index _SCREAMING_SNAKE_CASE : Any = pad_index _SCREAMING_SNAKE_CASE : Union[str, Any] = unk_index _SCREAMING_SNAKE_CASE : str = mask_index _SCREAMING_SNAKE_CASE : str = is_encoder _SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings _SCREAMING_SNAKE_CASE : Optional[int] = embed_init_std _SCREAMING_SNAKE_CASE : List[str] = init_std _SCREAMING_SNAKE_CASE : Optional[Any] = summary_type _SCREAMING_SNAKE_CASE : Optional[int] = summary_use_proj _SCREAMING_SNAKE_CASE : List[str] = summary_activation _SCREAMING_SNAKE_CASE : List[Any] = summary_proj_to_labels _SCREAMING_SNAKE_CASE : List[Any] = summary_first_dropout _SCREAMING_SNAKE_CASE : Any = start_n_top _SCREAMING_SNAKE_CASE : Optional[int] = end_n_top _SCREAMING_SNAKE_CASE : int = mask_token_id _SCREAMING_SNAKE_CASE : str = lang_id if "n_words" in kwargs: _SCREAMING_SNAKE_CASE : int = kwargs["""n_words"""] super().__init__(pad_token_id=_A , bos_token_id=_A , **_A) class _snake_case ( __snake_case ): """simple docstring""" @property def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" if self.task == "multiple-choice": _SCREAMING_SNAKE_CASE : int = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _SCREAMING_SNAKE_CASE : Dict = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ])
635
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class _snake_case ( __snake_case ): """simple docstring""" a = "facebook/bart-large-mnli" a = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) a = "text_classifier" a = AutoTokenizer a = AutoModelForSequenceClassification a = ["text", ["text"]] a = ["text"] def _lowerCAmelCase ( self : int): """simple docstring""" super().setup() _SCREAMING_SNAKE_CASE : Any = self.model.config _SCREAMING_SNAKE_CASE : Any = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("""entail"""): _SCREAMING_SNAKE_CASE : List[Any] = int(_A) if self.entailment_id == -1: raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""") def _lowerCAmelCase ( self : Optional[Any] , _A : Tuple , _A : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = labels return self.pre_processor( [text] * len(_A) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , ) def _lowerCAmelCase ( self : Tuple , _A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = outputs.logits _SCREAMING_SNAKE_CASE : List[Any] = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
635
1
"""simple docstring""" import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _snake_case ( __snake_case , unittest.TestCase ): """simple docstring""" a = MobileBertTokenizer a = MobileBertTokenizerFast a = True a = True a = filter_non_english a = "google/mobilebert-uncased" def _lowerCAmelCase ( self : List[Any]): """simple docstring""" super().setUp() _SCREAMING_SNAKE_CASE : Tuple = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] _SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens])) _SCREAMING_SNAKE_CASE : Any = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def _lowerCAmelCase ( self : Tuple , _A : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = """UNwant\u00E9d,running""" _SCREAMING_SNAKE_CASE : Any = """unwanted, running""" return input_text, output_text def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = self.tokenizer_class(self.vocab_file) _SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize("""UNwant\u00E9d,running""") self.assertListEqual(_A , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""]) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A) , [9, 6, 7, 1_2, 1_0, 1_1]) def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" if not self.test_rust_tokenizer: return _SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer() _SCREAMING_SNAKE_CASE : Optional[int] = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE : int = """UNwant\u00E9d,running""" _SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize(_A) _SCREAMING_SNAKE_CASE : Dict = rust_tokenizer.tokenize(_A) self.assertListEqual(_A , _A) _SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(_A , add_special_tokens=_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = rust_tokenizer.encode(_A , add_special_tokens=_A) self.assertListEqual(_A , _A) _SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(_A) _SCREAMING_SNAKE_CASE : Tuple = rust_tokenizer.encode(_A) self.assertListEqual(_A , _A) # With lower casing _SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(do_lower_case=_A) _SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer(do_lower_case=_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = """UNwant\u00E9d,running""" _SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize(_A) _SCREAMING_SNAKE_CASE : Tuple = rust_tokenizer.tokenize(_A) self.assertListEqual(_A , _A) _SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(_A , add_special_tokens=_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = rust_tokenizer.encode(_A , add_special_tokens=_A) self.assertListEqual(_A , _A) _SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE : int = tokenizer.encode(_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = rust_tokenizer.encode(_A) self.assertListEqual(_A , _A) def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""]) def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""]) def _lowerCAmelCase ( self : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""]) def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A , strip_accents=_A) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""]) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : str = BasicTokenizer(do_lower_case=_A) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""]) def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = BasicTokenizer(do_lower_case=_A) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""]) def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""]) def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A , strip_accents=_A) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""]) def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , never_split=["""[UNK]"""]) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""]) def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] _SCREAMING_SNAKE_CASE : str = {} for i, token in enumerate(_A): _SCREAMING_SNAKE_CASE : Dict = i _SCREAMING_SNAKE_CASE : Optional[int] = WordpieceTokenizer(vocab=_A , unk_token="""[UNK]""") self.assertListEqual(tokenizer.tokenize("""""") , []) self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""]) self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""]) def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" self.assertTrue(_is_whitespace(""" """)) self.assertTrue(_is_whitespace("""\t""")) self.assertTrue(_is_whitespace("""\r""")) self.assertTrue(_is_whitespace("""\n""")) self.assertTrue(_is_whitespace("""\u00A0""")) self.assertFalse(_is_whitespace("""A""")) self.assertFalse(_is_whitespace("""-""")) def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" self.assertTrue(_is_control("""\u0005""")) self.assertFalse(_is_control("""A""")) self.assertFalse(_is_control(""" """)) self.assertFalse(_is_control("""\t""")) self.assertFalse(_is_control("""\r""")) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" self.assertTrue(_is_punctuation("""-""")) self.assertTrue(_is_punctuation("""$""")) self.assertTrue(_is_punctuation("""`""")) self.assertTrue(_is_punctuation(""".""")) self.assertFalse(_is_punctuation("""A""")) self.assertFalse(_is_punctuation(""" """)) def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer() _SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_A) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]]) self.assertListEqual( [rust_tokenizer.tokenize(_A) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]]) @slow def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""") _SCREAMING_SNAKE_CASE : str = tokenizer.encode("""sequence builders""" , add_special_tokens=_A) _SCREAMING_SNAKE_CASE : int = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_A) _SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A) _SCREAMING_SNAKE_CASE : int = tokenizer.build_inputs_with_special_tokens(_A , _A) assert encoded_sentence == [1_0_1] + text + [1_0_2] assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2] def _lowerCAmelCase ( self : List[str]): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""): _SCREAMING_SNAKE_CASE : Dict = self.rust_tokenizer_class.from_pretrained(_A , **_A) _SCREAMING_SNAKE_CASE : Optional[Any] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" _SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus( _A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , ) _SCREAMING_SNAKE_CASE : int = tokenizer_r.do_lower_case if hasattr(_A , """do_lower_case""") else False _SCREAMING_SNAKE_CASE : Any = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), """Allen"""), ((2_1, 2_3), """##NL"""), ((2_3, 2_4), """##P"""), ((2_5, 3_3), """sentence"""), ((3_3, 3_4), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), """allen"""), ((2_1, 2_3), """##nl"""), ((2_3, 2_4), """##p"""), ((2_5, 3_3), """sentence"""), ((3_3, 3_4), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""])) self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""]) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = ["""的""", """人""", """有"""] _SCREAMING_SNAKE_CASE : Any = """""".join(_A) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""): _SCREAMING_SNAKE_CASE : Tuple = True _SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained(_A , **_A) _SCREAMING_SNAKE_CASE : Dict = self.rust_tokenizer_class.from_pretrained(_A , **_A) _SCREAMING_SNAKE_CASE : Dict = tokenizer_p.encode(_A , add_special_tokens=_A) _SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A) _SCREAMING_SNAKE_CASE : int = tokenizer_r.convert_ids_to_tokens(_A) _SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(_A) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_A , _A) self.assertListEqual(_A , _A) _SCREAMING_SNAKE_CASE : Any = False _SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A) _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained(_A , **_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.encode(_A , add_special_tokens=_A) _SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode(_A , add_special_tokens=_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.convert_ids_to_tokens(_A) # it is expected that only the first Chinese character is not preceded by "##". _SCREAMING_SNAKE_CASE : Union[str, Any] = [ f"""##{token}""" if idx != 0 else token for idx, token in enumerate(_A) ] self.assertListEqual(_A , _A) self.assertListEqual(_A , _A)
635
"""simple docstring""" import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class _snake_case ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""") _SCREAMING_SNAKE_CASE : str = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""") model.to(_A) from datasets import load_dataset _SCREAMING_SNAKE_CASE : Any = load_dataset("""nielsr/rvlcdip-demo""") _SCREAMING_SNAKE_CASE : Any = dataset["""train"""][0]["""image"""].convert("""RGB""") _SCREAMING_SNAKE_CASE : str = image_processor(_A , return_tensors="""pt""").to(_A) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE : Any = model(**_A) _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_6)) self.assertEqual(logits.shape , _A) _SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=_A , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4))
635
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase_ = { '''configuration_bridgetower''': [ '''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BridgeTowerConfig''', '''BridgeTowerTextConfig''', '''BridgeTowerVisionConfig''', ], '''processing_bridgetower''': ['''BridgeTowerProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''BridgeTowerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BridgeTowerForContrastiveLearning''', '''BridgeTowerForImageAndTextRetrieval''', '''BridgeTowerForMaskedLM''', '''BridgeTowerModel''', '''BridgeTowerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
635
"""simple docstring""" import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class _snake_case ( __snake_case ): """simple docstring""" a = "M-CLIP" def __init__( self : Optional[Any] , _A : List[str]=1_0_2_4 , _A : Union[str, Any]=7_6_8 , **_A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = transformerDimSize _SCREAMING_SNAKE_CASE : List[str] = imageDimSize super().__init__(**_A) class _snake_case ( __snake_case ): """simple docstring""" a = MCLIPConfig def __init__( self : Dict , _A : Optional[Any] , *_A : Any , **_A : Dict): """simple docstring""" super().__init__(_A , *_A , **_A) _SCREAMING_SNAKE_CASE : Tuple = XLMRobertaModel(_A) _SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims) def _lowerCAmelCase ( self : Union[str, Any] , _A : str , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.transformer(input_ids=_A , attention_mask=_A)[0] _SCREAMING_SNAKE_CASE : Optional[Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None] return self.LinearTransformation(_A), embs
635
1
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def lowerCamelCase_()-> Any: _SCREAMING_SNAKE_CASE : Any = ArgumentParser( description=( """PyTorch TPU distributed training launch """ """helper utility that will spawn up """ """multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=__SCREAMING_SNAKE_CASE , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=__SCREAMING_SNAKE_CASE , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=__SCREAMING_SNAKE_CASE ) return parser.parse_args() def lowerCamelCase_()-> List[Any]: _SCREAMING_SNAKE_CASE : Optional[Any] = parse_args() # Import training_script as a module. _SCREAMING_SNAKE_CASE : Optional[Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) _SCREAMING_SNAKE_CASE : List[str] = script_fpath.stem _SCREAMING_SNAKE_CASE : Optional[Any] = importlib.import_module(__SCREAMING_SNAKE_CASE ) # Patch sys.argv _SCREAMING_SNAKE_CASE : Optional[Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
635
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) _SCREAMING_SNAKE_CASE : int = precision _SCREAMING_SNAKE_CASE : Dict = ceil(precision / 14 ) _SCREAMING_SNAKE_CASE : int = 426_880 * Decimal(10_005 ).sqrt() _SCREAMING_SNAKE_CASE : Union[str, Any] = 1 _SCREAMING_SNAKE_CASE : str = 13_591_409 _SCREAMING_SNAKE_CASE : Tuple = Decimal(__SCREAMING_SNAKE_CASE ) for k in range(1 , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__SCREAMING_SNAKE_CASE ) ** 3) linear_term += 545_140_134 exponential_term *= -262_537_412_640_768_000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(F"The first {n} digits of pi is: {pi(n)}")
635
1
"""simple docstring""" import argparse import os import re import packaging.version lowerCAmelCase_ = '''examples/''' lowerCAmelCase_ = { '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } lowerCAmelCase_ = { '''init''': '''src/diffusers/__init__.py''', '''setup''': '''setup.py''', } lowerCAmelCase_ = '''README.md''' def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str: with open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: _SCREAMING_SNAKE_CASE : Tuple = f.read() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = REPLACE_PATTERNS[pattern] _SCREAMING_SNAKE_CASE : str = replace.replace("""VERSION""" , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[Any] = re_pattern.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[Any]: for folder, directories, fnames in os.walk(__SCREAMING_SNAKE_CASE ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , pattern="""examples""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if not patch: update_version_in_examples(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_()-> Any: _SCREAMING_SNAKE_CASE : Dict = """🤗 Transformers currently provides the following architectures""" _SCREAMING_SNAKE_CASE : List[Any] = """1. Want to contribute a new model?""" with open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: _SCREAMING_SNAKE_CASE : List[str] = f.readlines() # Find the start of the list. _SCREAMING_SNAKE_CASE : Union[str, Any] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 _SCREAMING_SNAKE_CASE : List[Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): _SCREAMING_SNAKE_CASE : Union[str, Any] = lines[index].replace( """https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , ) index += 1 with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_()-> int: with open(REPLACE_FILES["""init"""] , """r""" ) as f: _SCREAMING_SNAKE_CASE : Optional[Any] = f.read() _SCREAMING_SNAKE_CASE : Any = REPLACE_PATTERNS["""init"""][0].search(__SCREAMING_SNAKE_CASE ).groups()[0] return packaging.version.parse(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE=False )-> Any: _SCREAMING_SNAKE_CASE : str = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: _SCREAMING_SNAKE_CASE : str = default_version.base_version elif patch: _SCREAMING_SNAKE_CASE : List[str] = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: _SCREAMING_SNAKE_CASE : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. _SCREAMING_SNAKE_CASE : Dict = input(F"""Which version are you releasing? [{default_version}]""" ) if len(__SCREAMING_SNAKE_CASE ) == 0: _SCREAMING_SNAKE_CASE : int = default_version print(F"""Updating version to {version}.""" ) global_version_update(__SCREAMING_SNAKE_CASE , patch=__SCREAMING_SNAKE_CASE ) def lowerCamelCase_()-> Tuple: _SCREAMING_SNAKE_CASE : Optional[Any] = get_version() _SCREAMING_SNAKE_CASE : str = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" _SCREAMING_SNAKE_CASE : Tuple = current_version.base_version # Check with the user we got that right. _SCREAMING_SNAKE_CASE : List[Any] = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(__SCREAMING_SNAKE_CASE ) == 0: _SCREAMING_SNAKE_CASE : Optional[int] = dev_version print(F"""Updating version to {version}.""" ) global_version_update(__SCREAMING_SNAKE_CASE ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') lowerCAmelCase_ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
635
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _SCREAMING_SNAKE_CASE : Optional[int] = TapasConfig.from_json_file(__SCREAMING_SNAKE_CASE ) # set absolute/relative position embeddings parameter _SCREAMING_SNAKE_CASE : Dict = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _SCREAMING_SNAKE_CASE : str = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WTQ": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : Optional[int] = 4 _SCREAMING_SNAKE_CASE : Any = True # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 0.66_46_94 _SCREAMING_SNAKE_CASE : str = 0.20_79_51 _SCREAMING_SNAKE_CASE : str = 0.12_11_94 _SCREAMING_SNAKE_CASE : List[Any] = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = 0.0_35_25_13 _SCREAMING_SNAKE_CASE : Optional[Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : int = 4 _SCREAMING_SNAKE_CASE : Tuple = False # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 36.45_19 _SCREAMING_SNAKE_CASE : Union[str, Any] = 0.90_34_21 _SCREAMING_SNAKE_CASE : Optional[Any] = 2_22.0_88 _SCREAMING_SNAKE_CASE : Any = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Optional[int] = True _SCREAMING_SNAKE_CASE : Dict = 0.76_31_41 _SCREAMING_SNAKE_CASE : Union[str, Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "TABFACT": _SCREAMING_SNAKE_CASE : int = TapasForSequenceClassification(config=__SCREAMING_SNAKE_CASE ) elif task == "MLM": _SCREAMING_SNAKE_CASE : int = TapasForMaskedLM(config=__SCREAMING_SNAKE_CASE ) elif task == "INTERMEDIATE_PRETRAINING": _SCREAMING_SNAKE_CASE : int = TapasModel(config=__SCREAMING_SNAKE_CASE ) else: raise ValueError(F"""Task {task} not supported.""" ) print(F"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Save pytorch-model (weights and configuration) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Save tokenizer files print(F"""Save tokenizer files to {pytorch_dump_path}""" ) _SCREAMING_SNAKE_CASE : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 ) tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
635
1
"""simple docstring""" from collections import defaultdict def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : int = 1 _SCREAMING_SNAKE_CASE : Any = True for v in tree[start]: if v not in visited: ret += dfs(__SCREAMING_SNAKE_CASE ) if ret % 2 == 0: cuts.append(__SCREAMING_SNAKE_CASE ) return ret def lowerCamelCase_()-> Dict: dfs(1 ) if __name__ == "__main__": lowerCAmelCase_ , lowerCAmelCase_ = 10, 9 lowerCAmelCase_ = defaultdict(list) lowerCAmelCase_ = {} lowerCAmelCase_ = [] lowerCAmelCase_ = 0 lowerCAmelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
635
"""simple docstring""" from typing import Any import numpy as np def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool: return np.array_equal(__SCREAMING_SNAKE_CASE , matrix.conjugate().T ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: _SCREAMING_SNAKE_CASE : Optional[int] = v.conjugate().T _SCREAMING_SNAKE_CASE : Optional[int] = v_star.dot(__SCREAMING_SNAKE_CASE ) assert isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) return (v_star_dot.dot(__SCREAMING_SNAKE_CASE )) / (v_star.dot(__SCREAMING_SNAKE_CASE )) def lowerCamelCase_()-> None: _SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) _SCREAMING_SNAKE_CASE : int = np.array([[1], [2], [3]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" print(rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" assert rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
635
1
"""simple docstring""" lowerCAmelCase_ = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> list[str]: _SCREAMING_SNAKE_CASE : List[str] = set() # keep track of all the paths to be checked _SCREAMING_SNAKE_CASE : Dict = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _SCREAMING_SNAKE_CASE : Tuple = queue.pop(0 ) # get the last node from the path _SCREAMING_SNAKE_CASE : List[str] = path[-1] if node not in explored: _SCREAMING_SNAKE_CASE : List[Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _SCREAMING_SNAKE_CASE : str = list(__SCREAMING_SNAKE_CASE ) new_path.append(__SCREAMING_SNAKE_CASE ) queue.append(__SCREAMING_SNAKE_CASE ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(__SCREAMING_SNAKE_CASE ) # in case there's no path between the 2 nodes return [] def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _SCREAMING_SNAKE_CASE : List[Any] = [start] _SCREAMING_SNAKE_CASE : Optional[Any] = set(__SCREAMING_SNAKE_CASE ) # Keep tab on distances from `start` node. _SCREAMING_SNAKE_CASE : Optional[Any] = {start: 0, target: -1} while queue: _SCREAMING_SNAKE_CASE : Optional[Any] = queue.pop(0 ) if node == target: _SCREAMING_SNAKE_CASE : List[str] = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(__SCREAMING_SNAKE_CASE ) queue.append(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
635
"""simple docstring""" from __future__ import annotations def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative in a semiconductor""" ) elif hole_conc < 0: raise ValueError("""Hole concentration cannot be negative in a semiconductor""" ) elif intrinsic_conc < 0: raise ValueError( """Intrinsic concentration cannot be negative in a semiconductor""" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
635
1
"""simple docstring""" import numpy as np import datasets lowerCAmelCase_ = ''' Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] ''' lowerCAmelCase_ = '''\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } ''' lowerCAmelCase_ = ''' Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric("mahalanobis") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {\'mahalanobis\': array([0.5])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): """simple docstring""" def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""") , id="""X"""), }) , ) def _lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = np.array(_A) _SCREAMING_SNAKE_CASE : Dict = np.array(_A) # Assert that arrays are 2D if len(X.shape) != 2: raise ValueError("""Expected `X` to be a 2D vector""") if len(reference_distribution.shape) != 2: raise ValueError("""Expected `reference_distribution` to be a 2D vector""") if reference_distribution.shape[0] < 2: raise ValueError( """Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""") # Get mahalanobis distance for each prediction _SCREAMING_SNAKE_CASE : Tuple = X - np.mean(_A) _SCREAMING_SNAKE_CASE : List[str] = np.cov(reference_distribution.T) try: _SCREAMING_SNAKE_CASE : Optional[int] = np.linalg.inv(_A) except np.linalg.LinAlgError: _SCREAMING_SNAKE_CASE : Union[str, Any] = np.linalg.pinv(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = np.dot(_A , _A) _SCREAMING_SNAKE_CASE : Union[str, Any] = np.dot(_A , X_minus_mu.T).diagonal() return {"mahalanobis": mahal_dist}
635
"""simple docstring""" import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase_ = 16 lowerCAmelCase_ = 32 def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 )-> str: _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetDict( { """train""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """validation""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """test""": dataset["""validation"""], } ) def tokenize_function(__SCREAMING_SNAKE_CASE ): # max_length=None => use the model max length (it's actually the default) _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _SCREAMING_SNAKE_CASE : str = datasets.map( __SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__SCREAMING_SNAKE_CASE ): # On TPU it's best to pad everything to the same length or training will be very slow. _SCREAMING_SNAKE_CASE : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _SCREAMING_SNAKE_CASE : Optional[Any] = 16 elif accelerator.mixed_precision != "no": _SCREAMING_SNAKE_CASE : Any = 8 else: _SCREAMING_SNAKE_CASE : Optional[int] = None return tokenizer.pad( __SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , ) # Instantiate dataloaders. _SCREAMING_SNAKE_CASE : int = DataLoader( tokenized_datasets["""train"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["""validation"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets["""test"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader, test_dataloader def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: # New Code # _SCREAMING_SNAKE_CASE : Union[str, Any] = [] # Download the dataset _SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""glue""" , """mrpc""" ) # Create our splits _SCREAMING_SNAKE_CASE : Dict = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator _SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _SCREAMING_SNAKE_CASE : Tuple = config["""lr"""] _SCREAMING_SNAKE_CASE : Tuple = int(config["""num_epochs"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""seed"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""batch_size"""] ) _SCREAMING_SNAKE_CASE : List[str] = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation _SCREAMING_SNAKE_CASE : Any = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _SCREAMING_SNAKE_CASE : List[str] = batch_size // MAX_GPU_BATCH_SIZE _SCREAMING_SNAKE_CASE : List[str] = MAX_GPU_BATCH_SIZE set_seed(__SCREAMING_SNAKE_CASE ) # New Code # # Create our folds: _SCREAMING_SNAKE_CASE : List[str] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] ) _SCREAMING_SNAKE_CASE : Optional[Any] = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = get_fold_dataloaders( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device ) # Instantiate optimizer _SCREAMING_SNAKE_CASE : int = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE ) # Instantiate scheduler _SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup( optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(__SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = outputs.loss _SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps accelerator.backward(__SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , ) _SCREAMING_SNAKE_CASE : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE ) # New Code # # We also run predictions on the test set at the very end _SCREAMING_SNAKE_CASE : str = [] for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: _SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) _SCREAMING_SNAKE_CASE : List[str] = torch.stack(__SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) _SCREAMING_SNAKE_CASE : int = metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE ) accelerator.print("""Average test metrics from all folds:""" , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_()-> Optional[Any]: _SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) # New Code # parser.add_argument("""--num_folds""" , type=__SCREAMING_SNAKE_CASE , default=3 , help="""The number of splits to perform across the dataset""" ) _SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() _SCREAMING_SNAKE_CASE : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
635
1
"""simple docstring""" from __future__ import annotations def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative in a semiconductor""" ) elif hole_conc < 0: raise ValueError("""Hole concentration cannot be negative in a semiconductor""" ) elif intrinsic_conc < 0: raise ValueError( """Intrinsic concentration cannot be negative in a semiconductor""" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
635
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
1
"""simple docstring""" import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> tuple: return (data["data"], data["target"]) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> np.ndarray: _SCREAMING_SNAKE_CASE : int = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Predict target for test data _SCREAMING_SNAKE_CASE : List[str] = xgb.predict(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[Any] = predictions.reshape(len(__SCREAMING_SNAKE_CASE ) , 1 ) return predictions def lowerCamelCase_()-> None: _SCREAMING_SNAKE_CASE : Tuple = fetch_california_housing() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = data_handling(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = train_test_split( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , test_size=0.25 , random_state=1 ) _SCREAMING_SNAKE_CASE : Tuple = xgboost(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Error printing print(F"""Mean Absolute Error : {mean_absolute_error(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}""" ) print(F"""Mean Square Error : {mean_squared_error(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}""" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
635
"""simple docstring""" import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class _snake_case : """simple docstring""" def __init__( self : int , _A : List[Any] , _A : int , _A : int): """simple docstring""" if dst_width < 0 or dst_height < 0: raise ValueError("""Destination width/height should be > 0""") _SCREAMING_SNAKE_CASE : str = img _SCREAMING_SNAKE_CASE : Optional[Any] = img.shape[1] _SCREAMING_SNAKE_CASE : Tuple = img.shape[0] _SCREAMING_SNAKE_CASE : Any = dst_width _SCREAMING_SNAKE_CASE : Any = dst_height _SCREAMING_SNAKE_CASE : Any = self.src_w / self.dst_w _SCREAMING_SNAKE_CASE : Dict = self.src_h / self.dst_h _SCREAMING_SNAKE_CASE : Optional[Any] = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_5_5 ) def _lowerCAmelCase ( self : Tuple): """simple docstring""" for i in range(self.dst_h): for j in range(self.dst_w): _SCREAMING_SNAKE_CASE : Any = self.img[self.get_y(_A)][self.get_x(_A)] def _lowerCAmelCase ( self : int , _A : int): """simple docstring""" return int(self.ratio_x * x) def _lowerCAmelCase ( self : str , _A : int): """simple docstring""" return int(self.ratio_y * y) if __name__ == "__main__": lowerCAmelCase_ , lowerCAmelCase_ = 800, 600 lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1) lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output ) waitKey(0) destroyAllWindows()
635
1
"""simple docstring""" import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename lowerCAmelCase_ = '''http://www.mocksite.com/file1.txt''' lowerCAmelCase_ = '''"text": ["foo", "foo"]''' lowerCAmelCase_ = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8''' class _snake_case : """simple docstring""" a = 2_00 a = {"Content-Length": "100"} a = {} def _lowerCAmelCase ( self : str , **_A : str): """simple docstring""" return [bytes(_A , """utf-8""")] def lowerCamelCase_(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )-> Union[str, Any]: return MockResponse() @pytest.mark.parametrize("""urls_type""" , [str, list, dict] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: import requests monkeypatch.setattr(__SCREAMING_SNAKE_CASE , """request""" , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = URL if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = url elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = [url] elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""train""": url} _SCREAMING_SNAKE_CASE : str = """dummy""" _SCREAMING_SNAKE_CASE : Optional[Any] = """downloads""" _SCREAMING_SNAKE_CASE : int = tmp_path _SCREAMING_SNAKE_CASE : Optional[Any] = DownloadConfig( cache_dir=os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , use_etag=__SCREAMING_SNAKE_CASE , ) _SCREAMING_SNAKE_CASE : Any = DownloadManager(dataset_name=__SCREAMING_SNAKE_CASE , download_config=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = dl_manager.download(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = urls for downloaded_paths in [downloaded_paths]: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = [downloaded_paths] _SCREAMING_SNAKE_CASE : str = [urls] elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): assert "train" in downloaded_paths.keys() _SCREAMING_SNAKE_CASE : Tuple = downloaded_paths.values() _SCREAMING_SNAKE_CASE : Any = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): assert downloaded_path == dl_manager.downloaded_paths[input_url] _SCREAMING_SNAKE_CASE : List[str] = Path(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() _SCREAMING_SNAKE_CASE : Union[str, Any] = downloaded_path.read_text() assert content == CONTENT _SCREAMING_SNAKE_CASE : Tuple = downloaded_path.with_suffix(""".json""" ) assert metadata_downloaded_path.exists() _SCREAMING_SNAKE_CASE : Tuple = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize("""paths_type""" , [str, list, dict] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: _SCREAMING_SNAKE_CASE : List[str] = str(__SCREAMING_SNAKE_CASE ) if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = filename elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = [filename] elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = {"""train""": filename} _SCREAMING_SNAKE_CASE : Optional[Any] = """dummy""" _SCREAMING_SNAKE_CASE : Any = xz_file.parent _SCREAMING_SNAKE_CASE : Optional[Any] = """extracted""" _SCREAMING_SNAKE_CASE : Dict = DownloadConfig( cache_dir=__SCREAMING_SNAKE_CASE , use_etag=__SCREAMING_SNAKE_CASE , ) _SCREAMING_SNAKE_CASE : Optional[int] = DownloadManager(dataset_name=__SCREAMING_SNAKE_CASE , download_config=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = dl_manager.extract(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = paths for extracted_paths in [extracted_paths]: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = [extracted_paths] _SCREAMING_SNAKE_CASE : str = [paths] elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): assert "train" in extracted_paths.keys() _SCREAMING_SNAKE_CASE : Optional[Any] = extracted_paths.values() _SCREAMING_SNAKE_CASE : int = paths.values() assert extracted_paths for extracted_path, input_path in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): assert extracted_path == dl_manager.extracted_paths[input_path] _SCREAMING_SNAKE_CASE : List[str] = Path(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = extracted_path.parts assert parts[-1] == hash_url_to_filename(__SCREAMING_SNAKE_CASE , etag=__SCREAMING_SNAKE_CASE ) assert parts[-2] == extracted_subdir assert extracted_path.exists() _SCREAMING_SNAKE_CASE : Union[str, Any] = extracted_path.read_text() _SCREAMING_SNAKE_CASE : Any = text_file.read_text() assert extracted_file_content == expected_file_content def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: assert path.endswith(""".jsonl""" ) for num_items, line in enumerate(__SCREAMING_SNAKE_CASE , start=1 ): _SCREAMING_SNAKE_CASE : List[Any] = json.loads(line.decode("""utf-8""" ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : Optional[int] = request.getfixturevalue(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__SCREAMING_SNAKE_CASE ) , start=1 ): _test_jsonl(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert num_jsonl == 2 @pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str: _SCREAMING_SNAKE_CASE : Any = request.getfixturevalue(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : str = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__SCREAMING_SNAKE_CASE ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__SCREAMING_SNAKE_CASE ) , start=1 ): _test_jsonl(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert num_tar == 1 assert num_jsonl == 2 def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : List[Any] = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) , start=1 ): assert os.path.basename(__SCREAMING_SNAKE_CASE ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
635
"""simple docstring""" import argparse from collections import defaultdict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : str = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines() _SCREAMING_SNAKE_CASE : Optional[Any] = F"""class {class_name}(""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{4 * " "}def {test_name}(""" _SCREAMING_SNAKE_CASE : Tuple = F"""{8 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{16 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[str] = False _SCREAMING_SNAKE_CASE : Tuple = False _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : Any = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = 0 _SCREAMING_SNAKE_CASE : Dict = [] for line in lines: if line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = True elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = True elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )): _SCREAMING_SNAKE_CASE : Dict = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _SCREAMING_SNAKE_CASE : int = True if in_class and in_func and in_line: if ")" not in line: continue else: _SCREAMING_SNAKE_CASE : Any = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) _SCREAMING_SNAKE_CASE : Optional[int] = False else: new_lines.append(__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , """w""" ) as f: for line in new_lines: f.write(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None )-> Optional[Any]: if fail is not None: with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()} else: _SCREAMING_SNAKE_CASE : str = None with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : str = f.readlines() _SCREAMING_SNAKE_CASE : str = defaultdict(__SCREAMING_SNAKE_CASE ) for line in correct_lines: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) lowerCAmelCase_ = parser.parse_args() main(args.correct_filename, args.fail_filename)
635
1
"""simple docstring""" import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class _snake_case ( __snake_case ): """simple docstring""" def __init__( self : List[str] , _A : WhisperForConditionalGeneration , _A : WhisperProcessor , _A : AutoencoderKL , _A : CLIPTextModel , _A : CLIPTokenizer , _A : UNetaDConditionModel , _A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _A : StableDiffusionSafetyChecker , _A : CLIPImageProcessor , ): """simple docstring""" super().__init__() if safety_checker is None: logger.warning( f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" """ that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered""" """ results in services or applications open to the public. Both the diffusers team and Hugging Face""" """ strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling""" """ it only for use-cases that involve analyzing network behavior or auditing its results. For more""" """ information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""") self.register_modules( speech_model=_A , speech_processor=_A , vae=_A , text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , feature_extractor=_A , ) def _lowerCAmelCase ( self : Any , _A : Optional[Union[str, int]] = "auto"): """simple docstring""" if slice_size == "auto": _SCREAMING_SNAKE_CASE : Any = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_A) def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" self.enable_attention_slicing(_A) @torch.no_grad() def __call__( self : str , _A : str , _A : str=1_6_0_0_0 , _A : int = 5_1_2 , _A : int = 5_1_2 , _A : int = 5_0 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : Optional[int] , ): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = self.speech_processor.feature_extractor( _A , return_tensors="""pt""" , sampling_rate=_A).input_features.to(self.device) _SCREAMING_SNAKE_CASE : Tuple = self.speech_model.generate(_A , max_length=4_8_0_0_0_0) _SCREAMING_SNAKE_CASE : List[str] = self.speech_processor.tokenizer.batch_decode(_A , skip_special_tokens=_A , normalize=_A)[ 0 ] if isinstance(_A , _A): _SCREAMING_SNAKE_CASE : str = 1 elif isinstance(_A , _A): _SCREAMING_SNAKE_CASE : str = len(_A) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_A)}""") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_A , _A) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(_A)}.""") # get prompt text embeddings _SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer( _A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) _SCREAMING_SNAKE_CASE : Tuple = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f""" {self.tokenizer.model_max_length} tokens: {removed_text}""") _SCREAMING_SNAKE_CASE : int = text_input_ids[:, : self.tokenizer.model_max_length] _SCREAMING_SNAKE_CASE : int = self.text_encoder(text_input_ids.to(self.device))[0] # duplicate text embeddings for each generation per prompt, using mps friendly method _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = text_embeddings.shape _SCREAMING_SNAKE_CASE : Optional[Any] = text_embeddings.repeat(1 , _A , 1) _SCREAMING_SNAKE_CASE : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , _A , -1) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. _SCREAMING_SNAKE_CASE : Optional[Any] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: _SCREAMING_SNAKE_CASE : List[str] if negative_prompt is None: _SCREAMING_SNAKE_CASE : Tuple = [""""""] * batch_size elif type(_A) is not type(_A): raise TypeError( f"""`negative_prompt` should be the same type to `prompt`, but got {type(_A)} !=""" f""" {type(_A)}.""") elif isinstance(_A , _A): _SCREAMING_SNAKE_CASE : Optional[int] = [negative_prompt] elif batch_size != len(_A): raise ValueError( f"""`negative_prompt`: {negative_prompt} has batch size {len(_A)}, but `prompt`:""" f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" """ the batch size of `prompt`.""") else: _SCREAMING_SNAKE_CASE : Union[str, Any] = negative_prompt _SCREAMING_SNAKE_CASE : List[str] = text_input_ids.shape[-1] _SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer( _A , padding="""max_length""" , max_length=_A , truncation=_A , return_tensors="""pt""" , ) _SCREAMING_SNAKE_CASE : Any = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _SCREAMING_SNAKE_CASE : Dict = uncond_embeddings.shape[1] _SCREAMING_SNAKE_CASE : Dict = uncond_embeddings.repeat(1 , _A , 1) _SCREAMING_SNAKE_CASE : List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , _A , -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. _SCREAMING_SNAKE_CASE : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) _SCREAMING_SNAKE_CASE : int = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps _SCREAMING_SNAKE_CASE : Dict = torch.randn(_A , generator=_A , device="""cpu""" , dtype=_A).to( self.device) else: _SCREAMING_SNAKE_CASE : Dict = torch.randn(_A , generator=_A , device=self.device , dtype=_A) else: if latents.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""") _SCREAMING_SNAKE_CASE : str = latents.to(self.device) # set timesteps self.scheduler.set_timesteps(_A) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand _SCREAMING_SNAKE_CASE : int = self.scheduler.timesteps.to(self.device) # scale the initial noise by the standard deviation required by the scheduler _SCREAMING_SNAKE_CASE : int = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _SCREAMING_SNAKE_CASE : Any = """eta""" in set(inspect.signature(self.scheduler.step).parameters.keys()) _SCREAMING_SNAKE_CASE : Dict = {} if accepts_eta: _SCREAMING_SNAKE_CASE : Union[str, Any] = eta for i, t in enumerate(self.progress_bar(_A)): # expand the latents if we are doing classifier free guidance _SCREAMING_SNAKE_CASE : str = torch.cat([latents] * 2) if do_classifier_free_guidance else latents _SCREAMING_SNAKE_CASE : Dict = self.scheduler.scale_model_input(_A , _A) # predict the noise residual _SCREAMING_SNAKE_CASE : Optional[Any] = self.unet(_A , _A , encoder_hidden_states=_A).sample # perform guidance if do_classifier_free_guidance: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = noise_pred.chunk(2) _SCREAMING_SNAKE_CASE : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 _SCREAMING_SNAKE_CASE : Any = self.scheduler.step(_A , _A , _A , **_A).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_A , _A , _A) _SCREAMING_SNAKE_CASE : Tuple = 1 / 0.18_215 * latents _SCREAMING_SNAKE_CASE : Optional[Any] = self.vae.decode(_A).sample _SCREAMING_SNAKE_CASE : int = (image / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": _SCREAMING_SNAKE_CASE : List[str] = self.numpy_to_pil(_A) if not return_dict: return image return StableDiffusionPipelineOutput(images=_A , nsfw_content_detected=_A)
635
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowerCAmelCase_ = { '''text_branch''': '''text_model''', '''audio_branch''': '''audio_model.audio_encoder''', '''attn''': '''attention.self''', '''self.proj''': '''output.dense''', '''attention.self_mask''': '''attn_mask''', '''mlp.fc1''': '''intermediate.dense''', '''mlp.fc2''': '''output.dense''', '''norm1''': '''layernorm_before''', '''norm2''': '''layernorm_after''', '''bn0''': '''batch_norm''', } lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''') def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = create_model( """HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = {} _SCREAMING_SNAKE_CASE : Optional[Any] = R""".*sequential.(\d+).*""" _SCREAMING_SNAKE_CASE : Any = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # replace sequential layers with list _SCREAMING_SNAKE_CASE : List[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) _SCREAMING_SNAKE_CASE : Dict = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.""" ) elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... _SCREAMING_SNAKE_CASE : Dict = 1 if projecton_layer == 0 else 2 _SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value _SCREAMING_SNAKE_CASE : Dict = value _SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv.size(0 ) // 3 _SCREAMING_SNAKE_CASE : Optional[Any] = mixed_qkv[:qkv_dim] _SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim : qkv_dim * 2] _SCREAMING_SNAKE_CASE : Any = mixed_qkv[qkv_dim * 2 :] _SCREAMING_SNAKE_CASE : Dict = query_layer _SCREAMING_SNAKE_CASE : List[Any] = key_layer _SCREAMING_SNAKE_CASE : Dict = value_layer else: _SCREAMING_SNAKE_CASE : Optional[Any] = value return model_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> List[Any]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE ) clap_model.eval() _SCREAMING_SNAKE_CASE : Dict = clap_model.state_dict() _SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = ClapConfig() _SCREAMING_SNAKE_CASE : Tuple = enable_fusion _SCREAMING_SNAKE_CASE : Dict = ClapModel(__SCREAMING_SNAKE_CASE ) # ignore the spectrogram embedding layer model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''') lowerCAmelCase_ = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
635
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase_ = logging.get_logger(__name__) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[List[ImageInput]]: if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__SCREAMING_SNAKE_CASE ): return [[videos]] raise ValueError(F"""Could not make batched video from {videos}""" ) class _snake_case ( __snake_case ): """simple docstring""" a = ["pixel_values"] def __init__( self : Optional[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 2_5_5 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Dict , ): """simple docstring""" super().__init__(**_A) _SCREAMING_SNAKE_CASE : Any = size if size is not None else {"""shortest_edge""": 2_2_4} _SCREAMING_SNAKE_CASE : int = get_size_dict(_A , default_to_square=_A) _SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4} _SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(_A , param_name="""crop_size""") _SCREAMING_SNAKE_CASE : Optional[Any] = do_resize _SCREAMING_SNAKE_CASE : Any = size _SCREAMING_SNAKE_CASE : str = do_center_crop _SCREAMING_SNAKE_CASE : Optional[int] = crop_size _SCREAMING_SNAKE_CASE : Union[str, Any] = resample _SCREAMING_SNAKE_CASE : str = do_rescale _SCREAMING_SNAKE_CASE : Dict = rescale_factor _SCREAMING_SNAKE_CASE : List[str] = do_normalize _SCREAMING_SNAKE_CASE : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _SCREAMING_SNAKE_CASE : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self : Tuple , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(_A , default_to_square=_A) if "shortest_edge" in size: _SCREAMING_SNAKE_CASE : str = get_resize_output_image_size(_A , size["""shortest_edge"""] , default_to_square=_A) elif "height" in size and "width" in size: _SCREAMING_SNAKE_CASE : str = (size["""height"""], size["""width"""]) else: raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""") return resize(_A , size=_A , resample=_A , data_format=_A , **_A) def _lowerCAmelCase ( self : Tuple , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(_A) if "height" not in size or "width" not in size: raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""") return center_crop(_A , size=(size["""height"""], size["""width"""]) , data_format=_A , **_A) def _lowerCAmelCase ( self : Optional[int] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ): """simple docstring""" return rescale(_A , scale=_A , data_format=_A , **_A) def _lowerCAmelCase ( self : Dict , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ): """simple docstring""" return normalize(_A , mean=_A , std=_A , data_format=_A , **_A) def _lowerCAmelCase ( self : Optional[int] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ): """simple docstring""" if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""") if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""") if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""") if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""") # All transformations expect numpy arrays. _SCREAMING_SNAKE_CASE : Tuple = to_numpy_array(_A) if do_resize: _SCREAMING_SNAKE_CASE : Any = self.resize(image=_A , size=_A , resample=_A) if do_center_crop: _SCREAMING_SNAKE_CASE : Optional[Any] = self.center_crop(_A , size=_A) if do_rescale: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.rescale(image=_A , scale=_A) if do_normalize: _SCREAMING_SNAKE_CASE : int = self.normalize(image=_A , mean=_A , std=_A) _SCREAMING_SNAKE_CASE : str = to_channel_dimension_format(_A , _A) return image def _lowerCAmelCase ( self : str , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : List[Any] , ): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = do_resize if do_resize is not None else self.do_resize _SCREAMING_SNAKE_CASE : Optional[Any] = resample if resample is not None else self.resample _SCREAMING_SNAKE_CASE : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop _SCREAMING_SNAKE_CASE : Any = do_rescale if do_rescale is not None else self.do_rescale _SCREAMING_SNAKE_CASE : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor _SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize _SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else self.image_mean _SCREAMING_SNAKE_CASE : str = image_std if image_std is not None else self.image_std _SCREAMING_SNAKE_CASE : List[str] = size if size is not None else self.size _SCREAMING_SNAKE_CASE : int = get_size_dict(_A , default_to_square=_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size if crop_size is not None else self.crop_size _SCREAMING_SNAKE_CASE : List[str] = get_size_dict(_A , param_name="""crop_size""") if not valid_images(_A): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""") _SCREAMING_SNAKE_CASE : Dict = make_batched(_A) _SCREAMING_SNAKE_CASE : Dict = [ [ self._preprocess_image( image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , ) for img in video ] for video in videos ] _SCREAMING_SNAKE_CASE : List[str] = {"""pixel_values""": videos} return BatchFeature(data=_A , tensor_type=_A)
635
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , ) assert hasattr(self , """env""") def _lowerCAmelCase ( self : Union[str, Any] , _A : str=1): """simple docstring""" return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , ) def _lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any]): """simple docstring""" TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""") def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.create_estimator() # run training estimator.fit() # result dataframe _SCREAMING_SNAKE_CASE : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""]) _SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""]) # get train time from SageMaker job, this includes starting, preprocessing, stopping _SCREAMING_SNAKE_CASE : int = ( Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy) assert all(t <= self.results["""eval_loss"""] for t in eval_loss) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
635
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''', '''UniSpeechForCTC''', '''UniSpeechForPreTraining''', '''UniSpeechForSequenceClassification''', '''UniSpeechModel''', '''UniSpeechPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
"""simple docstring""" import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip lowerCAmelCase_ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: return max(metric_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for gt in ground_truths ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Dict = [] if args.gold_data_mode == "qa": _SCREAMING_SNAKE_CASE : int = pd.read_csv(__SCREAMING_SNAKE_CASE , sep="""\t""" , header=__SCREAMING_SNAKE_CASE ) for answer_list in data[1]: _SCREAMING_SNAKE_CASE : Union[str, Any] = ast.literal_eval(__SCREAMING_SNAKE_CASE ) answers.append(__SCREAMING_SNAKE_CASE ) else: _SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[int] = [[reference] for reference in references] _SCREAMING_SNAKE_CASE : Optional[int] = 0 for prediction, ground_truths in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): total += 1 em += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) fa += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = 1_00.0 * em / total _SCREAMING_SNAKE_CASE : Optional[Any] = 1_00.0 * fa / total logger.info(F"""F1: {fa:.2f}""" ) logger.info(F"""EM: {em:.2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = args.k _SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[Any] = 0 for hypo, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = set(hypo.split("""\t""" )[:k] ) _SCREAMING_SNAKE_CASE : Union[str, Any] = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k _SCREAMING_SNAKE_CASE : int = 1_00.0 * em / total logger.info(F"""Precision@{k}: {em: .2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: def strip_title(__SCREAMING_SNAKE_CASE ): if title.startswith("""\"""" ): _SCREAMING_SNAKE_CASE : Optional[int] = title[1:] if title.endswith("""\"""" ): _SCREAMING_SNAKE_CASE : str = title[:-1] return title _SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device ) _SCREAMING_SNAKE_CASE : List[str] = rag_model.rag.question_encoder(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = question_enc_outputs[0] _SCREAMING_SNAKE_CASE : List[Any] = rag_model.retriever( __SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) _SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for docs in all_docs: _SCREAMING_SNAKE_CASE : str = [strip_title(__SCREAMING_SNAKE_CASE ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(__SCREAMING_SNAKE_CASE ) ) return provenance_strings def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.attention_mask.to(args.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.generate( # rag_model overwrites generate __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) _SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) if args.print_predictions: for q, a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): logger.info("""Q: {} - A: {}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) return answers def lowerCamelCase_()-> List[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__SCREAMING_SNAKE_CASE , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=__SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=__SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=__SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__SCREAMING_SNAKE_CASE , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=__SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=__SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=__SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=50 , type=__SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) _SCREAMING_SNAKE_CASE : Dict = parser.parse_args() _SCREAMING_SNAKE_CASE : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : Union[str, Any] = {} if args.model_type is None: _SCREAMING_SNAKE_CASE : Optional[int] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration _SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs if args.index_name is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = args.index_name if args.index_path is not None: _SCREAMING_SNAKE_CASE : Any = args.index_path else: _SCREAMING_SNAKE_CASE : Any = BartForConditionalGeneration _SCREAMING_SNAKE_CASE : int = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = get_scores if args.eval_mode == """e2e""" else get_precision_at_k _SCREAMING_SNAKE_CASE : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(__SCREAMING_SNAKE_CASE ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , retriever=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.retriever.init_retrieval() else: _SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: _SCREAMING_SNAKE_CASE : str = [] for line in tqdm(__SCREAMING_SNAKE_CASE ): questions.append(line.strip() ) if len(__SCREAMING_SNAKE_CASE ) == args.eval_batch_size: _SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) + """\n""" ) preds_file.flush() _SCREAMING_SNAKE_CASE : Any = [] if len(__SCREAMING_SNAKE_CASE ) > 0: _SCREAMING_SNAKE_CASE : List[str] = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) ) preds_file.flush() score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": lowerCAmelCase_ = get_args() main(args)
635
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class _snake_case ( __snake_case ): """simple docstring""" a = "facebook/bart-large-mnli" a = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) a = "text_classifier" a = AutoTokenizer a = AutoModelForSequenceClassification a = ["text", ["text"]] a = ["text"] def _lowerCAmelCase ( self : int): """simple docstring""" super().setup() _SCREAMING_SNAKE_CASE : Any = self.model.config _SCREAMING_SNAKE_CASE : Any = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("""entail"""): _SCREAMING_SNAKE_CASE : List[Any] = int(_A) if self.entailment_id == -1: raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""") def _lowerCAmelCase ( self : Optional[Any] , _A : Tuple , _A : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = labels return self.pre_processor( [text] * len(_A) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , ) def _lowerCAmelCase ( self : Tuple , _A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = outputs.logits _SCREAMING_SNAKE_CASE : List[Any] = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
635
"""simple docstring""" import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def lowerCamelCase_(__SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=1_026 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , __SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , )-> Union[str, Any]: set_seed(3 ) # generate train_data and objective_set _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = generate_datasets( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , number=__SCREAMING_SNAKE_CASE , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? _SCREAMING_SNAKE_CASE : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # load pretrained model _SCREAMING_SNAKE_CASE : Any = load_gpta("""gpt2""" ).to(__SCREAMING_SNAKE_CASE ) print("""computing perplexity on objective set""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).item() print("""perplexity on objective set:""" , __SCREAMING_SNAKE_CASE ) # collect igf pairs and save to file demo.jbl collect_objective_set(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=15 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE="igf_model.pt" , )-> Optional[int]: set_seed(42 ) # Load pre-trained model _SCREAMING_SNAKE_CASE : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" ) # Initialize secondary learner to use embedding weights of model _SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(__SCREAMING_SNAKE_CASE ) # Train secondary learner _SCREAMING_SNAKE_CASE : Any = train_secondary_learner( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_epochs=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=__SCREAMING_SNAKE_CASE , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_000 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=recopy_gpta , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = RandomSampler(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(__SCREAMING_SNAKE_CASE )) + 1 _SCREAMING_SNAKE_CASE : List[Any] = 0 _SCREAMING_SNAKE_CASE : Any = torch.zeros((1, context_len) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = recopy_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) model.train() if secondary_learner is not None: secondary_learner.to(__SCREAMING_SNAKE_CASE ) secondary_learner.eval() _SCREAMING_SNAKE_CASE : Dict = [] _SCREAMING_SNAKE_CASE : Optional[int] = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = [] _SCREAMING_SNAKE_CASE : int = [] # Compute the performance of the transformer model at the beginning _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) for epoch in range(int(__SCREAMING_SNAKE_CASE ) ): for step, example in enumerate(__SCREAMING_SNAKE_CASE ): torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 ) _SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() _SCREAMING_SNAKE_CASE : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = True if secondary_learner is not None: _SCREAMING_SNAKE_CASE : List[Any] = secondary_learner.forward( torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item() observed_qs.append(float(__SCREAMING_SNAKE_CASE ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: _SCREAMING_SNAKE_CASE : Dict = -1 if predicted_q < threshold: _SCREAMING_SNAKE_CASE : List[str] = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) _SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def lowerCamelCase_()-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" ) # Required parameters parser.add_argument( """--data_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain data files for WikiText.""" , ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help=( """A jbl file containing tokenized data which can be split as objective dataset, """ """train_dataset and test_dataset.""" ) , ) parser.add_argument( """--igf_data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , ) parser.add_argument( """--output_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The output directory where the final fine-tuned model is stored.""" , ) parser.add_argument( """--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument("""--seed""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A seed for reproducible training.""" ) parser.add_argument( """--context_len""" , default=32 , type=__SCREAMING_SNAKE_CASE , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--size_objective_set""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""number of articles that are long enough to be used as our objective set""" , ) parser.add_argument( """--eval_freq""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""secondary model evaluation is triggered at eval_freq""" ) parser.add_argument("""--max_steps""" , default=1_000 , type=__SCREAMING_SNAKE_CASE , help="""To calculate training epochs""" ) parser.add_argument( """--secondary_learner_batch_size""" , default=128 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data for secondary learner""" , ) parser.add_argument( """--batch_size""" , default=16 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data of language model(gpt2) """ ) parser.add_argument( """--eval_interval""" , default=10 , type=__SCREAMING_SNAKE_CASE , help=( """decay the selectivity of our secondary learner filter from""" """1 standard deviation above average to 1 below average after 10 batches""" ) , ) parser.add_argument( """--number""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""The number of examples split to be used as objective_set/test_data""" ) parser.add_argument( """--min_len""" , default=1_026 , type=__SCREAMING_SNAKE_CASE , help="""The minimum length of the article to be used as objective set""" ) parser.add_argument( """--secondary_learner_max_epochs""" , default=15 , type=__SCREAMING_SNAKE_CASE , help="""number of epochs to train secondary learner""" ) parser.add_argument("""--trim""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""truncate the example if it exceeds context length""" ) parser.add_argument( """--threshold""" , default=1.0 , type=__SCREAMING_SNAKE_CASE , help=( """The threshold value used by secondary learner to filter the train_data and allow only""" """ informative data as input to the model""" ) , ) parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__SCREAMING_SNAKE_CASE , help="""finetuned_model_name""" ) parser.add_argument( """--recopy_model""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , ) # Load train data for secondary learner _SCREAMING_SNAKE_CASE : Optional[int] = joblib.load("""data/IGF_values.jbl""" ) # Train secondary learner _SCREAMING_SNAKE_CASE : int = training_secondary_learner( __SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , ) # load pretrained gpt2 model _SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = generate_datasets( context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=__SCREAMING_SNAKE_CASE , secondary_learner=__SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , ) if __name__ == "__main__": main()
635
1
"""simple docstring""" import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class _snake_case ( __snake_case ): """simple docstring""" a = CLIPConfig a = ["CLIPEncoderLayer"] def __init__( self : Optional[Any] , _A : CLIPConfig): """simple docstring""" super().__init__(_A) _SCREAMING_SNAKE_CASE : Tuple = CLIPVisionModelWithProjection(config.vision_config) _SCREAMING_SNAKE_CASE : Any = nn.Linear(config.vision_config.projection_dim , 1) _SCREAMING_SNAKE_CASE : int = nn.Linear(config.vision_config.projection_dim , 1) @torch.no_grad() def _lowerCAmelCase ( self : List[Any] , _A : int , _A : int , _A : Any=0.5 , _A : List[str]=0.5): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = self.vision_model(_A)[0] _SCREAMING_SNAKE_CASE : int = self.p_head(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = nsfw_detected.flatten() _SCREAMING_SNAKE_CASE : Optional[Any] = nsfw_detected > p_threshold _SCREAMING_SNAKE_CASE : Union[str, Any] = nsfw_detected.tolist() if any(_A): logger.warning( """Potential NSFW content was detected in one or more images. A black image will be returned instead.""" """ Try again with a different prompt and/or seed.""") for idx, nsfw_detected_ in enumerate(_A): if nsfw_detected_: _SCREAMING_SNAKE_CASE : str = np.zeros(images[idx].shape) _SCREAMING_SNAKE_CASE : Union[str, Any] = self.w_head(_A) _SCREAMING_SNAKE_CASE : Optional[int] = watermark_detected.flatten() _SCREAMING_SNAKE_CASE : Optional[int] = watermark_detected > w_threshold _SCREAMING_SNAKE_CASE : str = watermark_detected.tolist() if any(_A): logger.warning( """Potential watermarked content was detected in one or more images. A black image will be returned instead.""" """ Try again with a different prompt and/or seed.""") for idx, watermark_detected_ in enumerate(_A): if watermark_detected_: _SCREAMING_SNAKE_CASE : List[Any] = np.zeros(images[idx].shape) return images, nsfw_detected, watermark_detected
635
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( __snake_case ): """simple docstring""" a = ["image_processor", "tokenizer"] a = "ChineseCLIPImageProcessor" a = ("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , _A : Tuple=None , _A : List[Any]=None , **_A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _A , ) _SCREAMING_SNAKE_CASE : str = kwargs.pop("""feature_extractor""") _SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""") if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""") super().__init__(_A , _A) _SCREAMING_SNAKE_CASE : Dict = self.image_processor def __call__( self : Optional[int] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , **_A : int): """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""") if text is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_A , return_tensors=_A , **_A) if images is not None: _SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(_A , return_tensors=_A , **_A) if text is not None and images is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A) , tensor_type=_A) def _lowerCAmelCase ( self : str , *_A : Any , **_A : Any): """simple docstring""" return self.tokenizer.batch_decode(*_A , **_A) def _lowerCAmelCase ( self : Union[str, Any] , *_A : List[Any] , **_A : Any): """simple docstring""" return self.tokenizer.decode(*_A , **_A) @property def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names _SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def _lowerCAmelCase ( self : List[str]): """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , ) return self.image_processor_class
635
1