code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.17.0.dev0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt') lowerCAmelCase_ = logging.getLogger(__name__) @dataclass class _A : _UpperCamelCase : Optional[str] = field( default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) _UpperCamelCase : Optional[str] = field( default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , ) _UpperCamelCase : int = field( default=1_0_2_4 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) _UpperCamelCase : bool = field( default=_lowerCamelCase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) _UpperCamelCase : bool = field( default=_lowerCamelCase , metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) } , ) _UpperCamelCase : Optional[int] = field( default=_lowerCamelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) _UpperCamelCase : Optional[int] = field( default=_lowerCamelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) _UpperCamelCase : Optional[int] = field( default=_lowerCamelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of prediction examples to this ''' '''value if set.''' ) } , ) _UpperCamelCase : Optional[str] = field( default=_lowerCamelCase , metadata={'''help''': '''A csv or a json file containing the training data.'''} ) _UpperCamelCase : Optional[str] = field( default=_lowerCamelCase , metadata={'''help''': '''A csv or a json file containing the validation data.'''} ) _UpperCamelCase : Optional[str] = field(default=_lowerCamelCase , metadata={'''help''': '''A csv or a json file containing the test data.'''} ) def __a ( self : Dict ) -> List[str]: """simple docstring""" if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' ) else: lowercase : List[Any] = self.train_file.split('''.''' )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." lowercase : Tuple = self.validation_file.split('''.''' )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class _A : _UpperCamelCase : str = field( default=_lowerCamelCase , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) _UpperCamelCase : Optional[str] = field( default=_lowerCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _UpperCamelCase : Optional[str] = field( default=_lowerCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _UpperCamelCase : Optional[str] = field( default=_lowerCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) _UpperCamelCase : bool = field( default=_lowerCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) _UpperCamelCase : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) _UpperCamelCase : bool = field( default=_lowerCamelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) def snake_case( ) -> str: '''simple docstring''' lowercase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase , lowercase , lowercase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase , lowercase , lowercase : Optional[int] = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) lowercase : List[str] = training_args.get_process_log_level() logger.setLevel(__magic_name__ ) datasets.utils.logging.set_verbosity(__magic_name__ ) transformers.utils.logging.set_verbosity(__magic_name__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. lowercase : Optional[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase : Optional[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowercase : Dict = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. lowercase : int = {'''train''': data_args.train_file, '''validation''': data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: lowercase : List[str] = data_args.train_file.split('''.''' )[-1] lowercase : int = data_args.test_file.split('''.''' )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." lowercase : Optional[Any] = data_args.test_file else: raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' ) for key in data_files.keys(): logger.info(F"""load a local file for {key}: {data_files[key]}""" ) if data_args.train_file.endswith('''.csv''' ): # Loading a dataset from local csv files lowercase : Any = load_dataset('''csv''' , data_files=__magic_name__ , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files lowercase : Dict = load_dataset('''json''' , data_files=__magic_name__ , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels lowercase : List[str] = raw_datasets['''train'''].features['''label'''].names lowercase : List[str] = len(__magic_name__ ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase : str = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer lowercase : Optional[Any] = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__magic_name__ , ) lowercase : Tuple = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: lowercase : int = '''max_length''' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch lowercase : Optional[int] = False # Some models have set the order of the labels to use, so let's make sure we do use it. lowercase : int = {'''Refused''': 0, '''Entailed''': 1} lowercase : List[str] = {0: '''Refused''', 1: '''Entailed'''} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) lowercase : int = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(__magic_name__ ): # Tokenize the texts def _convert_table_text_to_pandas(__magic_name__ ): lowercase : Union[str, Any] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )] lowercase : Optional[int] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd lowercase : str = examples['''statement'''] lowercase : Optional[Any] = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) ) lowercase : Any = tokenizer(__magic_name__ , __magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ ) lowercase : Tuple = examples['''label'''] return result with training_args.main_process_first(desc='''dataset map pre-processing''' ): lowercase : Tuple = raw_datasets.map( __magic_name__ , batched=__magic_name__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError('''--do_train requires a train dataset''' ) lowercase : Tuple = raw_datasets['''train'''] if data_args.max_train_samples is not None: lowercase : List[str] = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError('''--do_eval requires a validation dataset''' ) lowercase : Tuple = raw_datasets['''validation'''] if data_args.max_eval_samples is not None: lowercase : Optional[Any] = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError('''--do_predict requires a test dataset''' ) lowercase : List[Any] = raw_datasets['''test'''] if data_args.max_predict_samples is not None: lowercase : int = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(__magic_name__ ) ) , 3 ): logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__magic_name__ ): lowercase : Union[str, Any] = p.predictions[0] if isinstance(p.predictions , __magic_name__ ) else p.predictions lowercase : int = np.argmax(__magic_name__ , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: lowercase : str = default_data_collator elif training_args.fpaa: lowercase : List[str] = DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) else: lowercase : List[str] = None # Initialize our Trainer lowercase : Tuple = Trainer( model=__magic_name__ , args=__magic_name__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__magic_name__ , tokenizer=__magic_name__ , data_collator=__magic_name__ , ) # Training if training_args.do_train: lowercase : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: lowercase : Any = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase : int = last_checkpoint lowercase : str = trainer.train(resume_from_checkpoint=__magic_name__ ) lowercase : Union[str, Any] = train_result.metrics lowercase : str = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__magic_name__ ) ) lowercase : int = min(__magic_name__ , len(__magic_name__ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('''train''' , __magic_name__ ) trainer.save_metrics('''train''' , __magic_name__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowercase : List[str] = trainer.evaluate(eval_dataset=__magic_name__ ) lowercase : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__magic_name__ ) lowercase : str = min(__magic_name__ , len(__magic_name__ ) ) trainer.log_metrics('''eval''' , __magic_name__ ) trainer.save_metrics('''eval''' , __magic_name__ ) if training_args.do_predict: logger.info('''*** Predict ***''' ) # Removing the `label` columns because it contains -1 and Trainer won't like that. lowercase : Tuple = predict_dataset.remove_columns('''label''' ) lowercase : List[str] = trainer.predict(__magic_name__ , metric_key_prefix='''predict''' ).predictions lowercase : Dict = np.argmax(__magic_name__ , axis=1 ) lowercase : Tuple = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' ) if trainer.is_world_process_zero(): with open(__magic_name__ , '''w''' ) as writer: logger.info('''***** Predict Results *****''' ) writer.write('''index\tprediction\n''' ) for index, item in enumerate(__magic_name__ ): lowercase : Tuple = label_list[item] writer.write(F"""{index}\t{item}\n""" ) lowercase : List[Any] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''} if training_args.push_to_hub: trainer.push_to_hub(**__magic_name__ ) else: trainer.create_model_card(**__magic_name__ ) def snake_case( __magic_name__ ) -> List[str]: '''simple docstring''' main() if __name__ == "__main__": main()
308
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(_lowerCamelCase ) , '''Tatoeba directory does not exist.''' ) class _A ( unittest.TestCase ): @cached_property def __a ( self : int ) -> Dict: """simple docstring""" lowercase : str = tempfile.mkdtemp() return TatoebaConverter(save_dir=_A ) @slow def __a ( self : Any ) -> List[Any]: """simple docstring""" self.resolver.convert_models(['''heb-eng'''] ) @slow def __a ( self : int ) -> Tuple: """simple docstring""" lowercase , lowercase : Optional[Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_A ) assert mmeta["long_pair"] == "heb-eng"
308
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available lowerCAmelCase_ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
308
from __future__ import annotations from typing import Any def snake_case( __magic_name__ ) -> None: '''simple docstring''' create_state_space_tree(__magic_name__ , [] , 0 ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> None: '''simple docstring''' if index == len(__magic_name__ ): print(__magic_name__ ) return create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 ) current_subsequence.pop() if __name__ == "__main__": lowerCAmelCase_ = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(['A', 'B', 'C']) generate_all_subsequences(seq)
308
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): @slow def __a ( self : str ) -> int: """simple docstring""" lowercase : List[Any] = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' ) lowercase : int = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" lowercase : List[Any] = model(_A )['''last_hidden_state'''] lowercase : Dict = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , _A ) # compare the actual values for a slice. lowercase : List[str] = tf.convert_to_tensor( [[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
308
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging lowerCAmelCase_ = logging.get_logger(__name__) class _A ( _lowerCamelCase ): _UpperCamelCase : Dict = ['''input_features'''] def __init__( self : int , _A : int=80 , _A : Union[str, Any]=16_000 , _A : Union[str, Any]=160 , _A : Any=30 , _A : str=400 , _A : Union[str, Any]=0.0 , _A : Tuple=False , **_A : List[str] , ) -> int: """simple docstring""" super().__init__( feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , ) lowercase : Optional[Any] = n_fft lowercase : Optional[int] = hop_length lowercase : Optional[int] = chunk_length lowercase : Union[str, Any] = chunk_length * sampling_rate lowercase : Optional[Any] = self.n_samples // hop_length lowercase : Optional[Any] = sampling_rate lowercase : Union[str, Any] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , ) def __a ( self : Dict , _A : np.array ) -> np.ndarray: """simple docstring""" lowercase : List[str] = spectrogram( _A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , ) lowercase : Union[str, Any] = log_spec[:, :-1] lowercase : Optional[Any] = np.maximum(_A , log_spec.max() - 8.0 ) lowercase : str = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]: """simple docstring""" if attention_mask is not None: lowercase : Optional[Any] = np.array(_A , np.intaa ) lowercase : List[str] = [] for vector, length in zip(_A , attention_mask.sum(-1 ) ): lowercase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: lowercase : int = padding_value normed_input_values.append(_A ) else: lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self : Union[str, Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : int , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) lowercase : Union[str, Any] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) lowercase : Optional[Any] = is_batched_numpy or ( isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowercase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_A , np.ndarray ): lowercase : List[Any] = np.asarray(_A , dtype=np.floataa ) elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase : List[str] = [np.asarray([raw_speech] ).T] lowercase : Tuple = BatchFeature({'''input_features''': raw_speech} ) # convert into correct format for padding lowercase : str = self.pad( _A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: lowercase : Tuple = self.zero_mean_unit_var_norm( padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , ) lowercase : str = np.stack(padded_inputs['''input_features'''] , axis=0 ) # make sure list is in array format lowercase : List[str] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 ) lowercase : str = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]] if isinstance(input_features[0] , _A ): lowercase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features] else: lowercase : Optional[int] = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) lowercase : List[str] = padded_inputs['''attention_mask'''][:, :: self.hop_length] if return_tensors is not None: lowercase : Any = padded_inputs.convert_to_tensors(_A ) return padded_inputs def __a ( self : Optional[Any] ) -> Dict[str, Any]: """simple docstring""" lowercase : Optional[Any] = copy.deepcopy(self.__dict__ ) lowercase : Dict = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
308
1
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCAmelCase_ = ( '4S 3H 2C 7S 5H', '9D 8H 2C 6S 7H', '2D 6D 9D TH 7D', 'TC 8C 2S JH 6C', 'JH 8S TH AH QH', 'TS KS 5S 9S AC', 'KD 6S 9D TH AD', 'KS 8D 4D 9S 4S', # pair '8C 4S KH JS 4D', # pair 'QH 8H KD JH 8S', # pair 'KC 4H KS 2H 8D', # pair 'KD 4S KC 3H 8S', # pair 'AH 8S AS KC JH', # pair '3H 4C 4H 3S 2H', # 2 pairs '5S 5D 2C KH KH', # 2 pairs '3C KH 5D 5S KH', # 2 pairs 'AS 3C KH AD KH', # 2 pairs '7C 7S 3S 7H 5S', # 3 of a kind '7C 7S KH 2H 7H', # 3 of a kind 'AC KH QH AH AS', # 3 of a kind '2H 4D 3C AS 5S', # straight (low ace) '3C 5C 4C 2C 6H', # straight '6S 8S 7S 5H 9H', # straight 'JS QS 9H TS KH', # straight 'QC KH TS JS AH', # straight (high ace) '8C 9C 5C 3C TC', # flush '3S 8S 9S 5S KS', # flush '4C 5C 9C 8C KC', # flush 'JH 8H AH KH QH', # flush '3D 2H 3H 2C 2D', # full house '2H 2C 3S 3H 3D', # full house 'KH KC 3S 3H 3D', # full house 'JC 6H JS JD JH', # 4 of a kind 'JC 7H JS JD JH', # 4 of a kind 'JC KH JS JD JH', # 4 of a kind '2S AS 4S 5S 3S', # straight flush (low ace) '2D 6D 3D 4D 5D', # straight flush '5C 6C 3C 7C 4C', # straight flush 'JH 9H TH KH QH', # straight flush 'JH AH TH KH QH', # royal flush (high ace straight flush) ) lowerCAmelCase_ = ( ('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'), ('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'), ('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'), ('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'), ('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'), ('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'), ('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'), ('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'), ('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'), ('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'), ('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'), ('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'), ('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'), ('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'), ('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'), ('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'), ('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'), ('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'), ('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'), ('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'), ('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'), ('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'), ('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'), ('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'), ('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'), ('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'), ('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'), ('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'), ('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'), ('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'), ('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'), ) lowerCAmelCase_ = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', True), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', False), ('AS 3S 4S 8S 2S', True), ) lowerCAmelCase_ = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', False), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', True), ) lowerCAmelCase_ = ( ('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]), ('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]), ('JH QD KC AS TS', False, [14, 13, 12, 11, 10]), ('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]), ) lowerCAmelCase_ = ( ('JH AH TH KH QH', 0), ('JH 9H TH KH QH', 0), ('JC KH JS JD JH', 7), ('KH KC 3S 3H 3D', 6), ('8C 9C 5C 3C TC', 0), ('JS QS 9H TS KH', 0), ('7C 7S KH 2H 7H', 3), ('3C KH 5D 5S KH', 2), ('QH 8H KD JH 8S', 1), ('2D 6D 9D TH 7D', 0), ) lowerCAmelCase_ = ( ('JH AH TH KH QH', 23), ('JH 9H TH KH QH', 22), ('JC KH JS JD JH', 21), ('KH KC 3S 3H 3D', 20), ('8C 9C 5C 3C TC', 19), ('JS QS 9H TS KH', 18), ('7C 7S KH 2H 7H', 17), ('3C KH 5D 5S KH', 16), ('QH 8H KD JH 8S', 15), ('2D 6D 9D TH 7D', 14), ) def snake_case( ) -> Optional[Any]: '''simple docstring''' lowercase , lowercase : Optional[int] = randrange(len(__magic_name__ ) ), randrange(len(__magic_name__ ) ) lowercase : str = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)] lowercase , lowercase : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def snake_case( __magic_name__ = 1_00 ) -> Optional[int]: '''simple docstring''' return (generate_random_hand() for _ in range(__magic_name__ )) @pytest.mark.parametrize('''hand, expected''' , __magic_name__ ) def snake_case( __magic_name__ , __magic_name__ ) -> Optional[Any]: '''simple docstring''' assert PokerHand(__magic_name__ )._is_flush() == expected @pytest.mark.parametrize('''hand, expected''' , __magic_name__ ) def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' assert PokerHand(__magic_name__ )._is_straight() == expected @pytest.mark.parametrize('''hand, expected, card_values''' , __magic_name__ ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' lowercase : Optional[Any] = PokerHand(__magic_name__ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('''hand, expected''' , __magic_name__ ) def snake_case( __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' assert PokerHand(__magic_name__ )._is_same_kind() == expected @pytest.mark.parametrize('''hand, expected''' , __magic_name__ ) def snake_case( __magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' assert PokerHand(__magic_name__ )._hand_type == expected @pytest.mark.parametrize('''hand, other, expected''' , __magic_name__ ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' assert PokerHand(__magic_name__ ).compare_with(PokerHand(__magic_name__ ) ) == expected @pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' assert PokerHand(__magic_name__ ).compare_with(PokerHand(__magic_name__ ) ) == expected def snake_case( ) -> Optional[int]: '''simple docstring''' lowercase : List[str] = [PokerHand(__magic_name__ ) for hand in SORTED_HANDS] lowercase : List[Any] = poker_hands.copy() shuffle(__magic_name__ ) lowercase : List[Any] = chain(sorted(__magic_name__ ) ) for index, hand in enumerate(__magic_name__ ): assert hand == poker_hands[index] def snake_case( ) -> str: '''simple docstring''' lowercase : List[str] = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )] pokerhands.sort(reverse=__magic_name__ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def snake_case( ) -> Optional[Any]: '''simple docstring''' lowercase : Optional[int] = PokerHand('''2C 4S AS 3D 5C''' ) lowercase : Dict = True lowercase : List[str] = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def snake_case( ) -> Dict: '''simple docstring''' lowercase : int = 0 lowercase : Optional[int] = os.path.abspath(os.path.dirname(__magic_name__ ) ) lowercase : List[str] = os.path.join(__magic_name__ , '''poker_hands.txt''' ) with open(__magic_name__ ) as file_hand: for line in file_hand: lowercase : Tuple = line[:14].strip() lowercase : str = line[15:].strip() lowercase , lowercase : int = PokerHand(__magic_name__ ), PokerHand(__magic_name__ ) lowercase : Dict = player.compare_with(__magic_name__ ) if output == "Win": answer += 1 assert answer == 3_76
308
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class _A : def __init__( self : int , _A : Optional[int] , _A : Any=13 , _A : List[Any]=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : str=True , _A : Any=True , _A : Dict=True , _A : Optional[Any]=False , _A : Any=False , _A : List[str]=False , _A : Optional[int]=2 , _A : List[Any]=99 , _A : str=0 , _A : Dict=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]="last" , _A : List[str]=True , _A : Tuple=None , _A : Optional[Any]=0 , ) -> Any: """simple docstring""" lowercase : str = parent lowercase : Optional[Any] = batch_size lowercase : Union[str, Any] = seq_length lowercase : str = is_training lowercase : str = use_input_lengths lowercase : List[Any] = use_token_type_ids lowercase : Union[str, Any] = use_labels lowercase : Tuple = gelu_activation lowercase : Dict = sinusoidal_embeddings lowercase : Any = causal lowercase : str = asm lowercase : Optional[Any] = n_langs lowercase : Dict = vocab_size lowercase : Dict = n_special lowercase : List[Any] = hidden_size lowercase : str = num_hidden_layers lowercase : int = num_attention_heads lowercase : str = hidden_dropout_prob lowercase : Dict = attention_probs_dropout_prob lowercase : List[Any] = max_position_embeddings lowercase : Optional[int] = type_sequence_label_size lowercase : List[str] = initializer_range lowercase : List[str] = num_labels lowercase : int = num_choices lowercase : int = summary_type lowercase : Tuple = use_proj lowercase : Union[str, Any] = scope lowercase : List[str] = bos_token_id def __a ( self : Any ) -> Dict: """simple docstring""" lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase : str = None if self.use_input_lengths: lowercase : int = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase : Union[str, Any] = None if self.use_token_type_ids: lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase : Union[str, Any] = None lowercase : List[str] = None lowercase : Optional[Any] = None if self.use_labels: lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float() lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices ) lowercase : List[Any] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __a ( self : Any ) -> List[Any]: """simple docstring""" return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def __a ( self : int , _A : str , _A : Optional[Any] , _A : int , _A : List[str] , _A : Any , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : Tuple , ) -> List[Any]: """simple docstring""" lowercase : List[Any] = XLMModel(config=_A ) model.to(_A ) model.eval() lowercase : Tuple = model(_A , lengths=_A , langs=_A ) lowercase : Dict = model(_A , langs=_A ) lowercase : int = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : int , _A : Dict , _A : int , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : Dict , ) -> Optional[Any]: """simple docstring""" lowercase : Optional[int] = XLMWithLMHeadModel(_A ) model.to(_A ) model.eval() lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Tuple , _A : int , ) -> Union[str, Any]: """simple docstring""" lowercase : Dict = XLMForQuestionAnsweringSimple(_A ) model.to(_A ) model.eval() lowercase : List[str] = model(_A ) lowercase : Any = model(_A , start_positions=_A , end_positions=_A ) lowercase : Any = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : Any , _A : str , _A : Union[str, Any] , ) -> Dict: """simple docstring""" lowercase : Optional[int] = XLMForQuestionAnswering(_A ) model.to(_A ) model.eval() lowercase : Any = model(_A ) lowercase : Tuple = model( _A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , ) lowercase : Optional[int] = model( _A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , ) ((lowercase) , ) : Optional[int] = result_with_labels.to_tuple() lowercase : List[str] = model(_A , start_positions=_A , end_positions=_A ) ((lowercase) , ) : Any = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def __a ( self : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , ) -> int: """simple docstring""" lowercase : List[str] = XLMForSequenceClassification(_A ) model.to(_A ) model.eval() lowercase : List[str] = model(_A ) lowercase : Union[str, Any] = model(_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Union[str, Any] , _A : str , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , ) -> Dict: """simple docstring""" lowercase : Optional[Any] = self.num_labels lowercase : Tuple = XLMForTokenClassification(_A ) model.to(_A ) model.eval() lowercase : str = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self : List[Any] , _A : List[str] , _A : Dict , _A : str , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , ) -> Union[str, Any]: """simple docstring""" lowercase : int = self.num_choices lowercase : List[Any] = XLMForMultipleChoice(config=_A ) model.to(_A ) model.eval() lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Dict = model( _A , attention_mask=_A , token_type_ids=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" lowercase : List[Any] = self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Union[str, Any] = config_and_inputs lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths} return config, inputs_dict @require_torch class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): _UpperCamelCase : Any = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _UpperCamelCase : str = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _UpperCamelCase : Tuple = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def __a ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] ) -> List[Any]: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __a ( self : Dict , _A : Tuple , _A : List[str] , _A : int=False ) -> Optional[Any]: """simple docstring""" lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": lowercase : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) lowercase : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) return inputs_dict def __a ( self : Any ) -> List[str]: """simple docstring""" lowercase : List[str] = XLMModelTester(self ) lowercase : Any = ConfigTester(self , config_class=_A , emb_dim=37 ) def __a ( self : List[Any] ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def __a ( self : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*_A ) def __a ( self : Any ) -> Dict: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*_A ) def __a ( self : List[str] ) -> Optional[int]: """simple docstring""" lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*_A ) def __a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*_A ) def __a ( self : List[str] ) -> Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*_A ) def __a ( self : Dict ) -> int: """simple docstring""" lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*_A ) def __a ( self : Any ) -> List[Any]: """simple docstring""" lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*_A ) def __a ( self : int , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any: """simple docstring""" self.assertIsInstance(_A , _A ) self.assertListEqual( [isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) ) self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(_A ): # adds PAD dummy token lowercase : List[Any] = min_length + idx + 1 lowercase : str = min_length + idx + 1 lowercase : Any = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) ) def __a ( self : int , _A : Optional[int] , _A : Dict , _A : Any , _A : List[str] , _A : Optional[int] , _A : List[Any]=False , _A : List[Any]=1 ) -> str: """simple docstring""" self.assertIsInstance(_A , _A ) self.assertListEqual( [isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , ) self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(_A ): # adds PAD dummy token lowercase : Union[str, Any] = min_length + idx + 1 lowercase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , ) pass @slow def __a ( self : Optional[int] ) -> Any: """simple docstring""" for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : Any = XLMModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch class _A ( unittest.TestCase ): @slow def __a ( self : Any ) -> Optional[Any]: """simple docstring""" lowercase : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' ) model.to(_A ) lowercase : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president lowercase : List[str] = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference lowercase : Dict = model.generate(_A , do_sample=_A ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
308
1
from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class _A ( _lowerCamelCase ): _UpperCamelCase : int = ['''input_features''', '''attention_mask'''] def __init__( self : Any , _A : Any=80 , _A : str=16_000 , _A : List[str]=0.0 , _A : Dict=10 , _A : Optional[Any]=25 , _A : Optional[int]="hamming_window" , _A : Dict=32_768.0 , _A : Union[str, Any]=0.97 , _A : List[str]=1.0 , _A : Tuple=True , _A : Optional[int]=True , _A : List[Any]=False , **_A : Union[str, Any] , ) -> int: """simple docstring""" super().__init__(feature_size=_A , sampling_rate=_A , padding_value=_A , **_A ) lowercase : str = feature_size lowercase : Tuple = sampling_rate lowercase : List[str] = padding_value lowercase : Dict = hop_length lowercase : Tuple = win_length lowercase : Union[str, Any] = frame_signal_scale lowercase : int = preemphasis_coeff lowercase : List[str] = mel_floor lowercase : Dict = normalize_means lowercase : Optional[Any] = normalize_vars lowercase : Optional[int] = win_function lowercase : Any = return_attention_mask lowercase : List[Any] = win_length * sampling_rate // 1_000 lowercase : Any = hop_length * sampling_rate // 1_000 lowercase : Optional[Any] = optimal_fft_length(self.sample_size ) lowercase : List[str] = (self.n_fft // 2) + 1 def __a ( self : Any , _A : np.array ) -> np.ndarray: """simple docstring""" if self.win_function == "hamming_window": lowercase : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=_A ) else: lowercase : List[str] = window_function(window_length=self.sample_size , name=self.win_function ) lowercase : Optional[Any] = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) lowercase : Tuple = spectrogram( one_waveform * self.frame_signal_scale , window=_A , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_A , preemphasis=self.preemphasis_coeff , mel_filters=_A , mel_floor=self.mel_floor , log_mel='''log''' , ) return msfc_features.T def __a ( self : int , _A : List[Any] , _A : str , _A : str ) -> List[Any]: """simple docstring""" if self.normalize_means: lowercase : Dict = x[:input_length].mean(axis=0 ) lowercase : str = np.subtract(_A , _A ) if self.normalize_vars: lowercase : int = x[:input_length].std(axis=0 ) lowercase : int = np.divide(_A , _A ) if input_length < x.shape[0]: lowercase : str = padding_value # make sure array is in float32 lowercase : Union[str, Any] = x.astype(np.floataa ) return x def __a ( self : int , _A : List[np.ndarray] , _A : Optional[np.ndarray] = None ) -> List[np.ndarray]: """simple docstring""" lowercase : Any = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(_A , _A , self.padding_value ) for x, n in zip(_A , _A )] def __call__( self : str , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[int] = None , **_A : str , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the ``sampling_rate`` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) lowercase : int = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) lowercase : str = is_batched_numpy or ( isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowercase : Tuple = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_A , np.ndarray ): lowercase : Tuple = np.asarray(_A , dtype=np.floataa ) elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase : Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase : Dict = [raw_speech] # extract fbank features lowercase : Dict = [self._extract_mfsc_features(_A ) for one_waveform in raw_speech] # convert into correct format for padding lowercase : Optional[Any] = BatchFeature({'''input_features''': features} ) lowercase : Tuple = self.pad( _A , padding=_A , max_length=_A , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=_A , **_A , ) # make sure list is in array format lowercase : Optional[Any] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , _A ): lowercase : Optional[int] = [np.asarray(_A , dtype=np.floataa ) for feature in input_features] lowercase : str = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: lowercase : Union[str, Any] = [np.asarray(_A , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: lowercase : Tuple = ( np.array(_A , dtype=np.intaa ) if self._get_padding_strategies(_A , max_length=_A ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) lowercase : Optional[Any] = self.normalize( padded_inputs['''input_features'''] , attention_mask=_A ) if return_tensors is not None: lowercase : List[str] = padded_inputs.convert_to_tensors(_A ) return padded_inputs
308
def snake_case( __magic_name__ = 50 ) -> int: '''simple docstring''' lowercase : Union[str, Any] = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
308
1
import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class _A ( pl.LightningModule ): def __init__( self : int , _A : int ) -> List[Any]: """simple docstring""" super().__init__() lowercase : List[str] = model lowercase : List[Any] = 2 lowercase : Tuple = nn.Linear(self.model.config.hidden_size , self.num_labels ) def __a ( self : List[Any] ) -> List[str]: """simple docstring""" pass def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' lowercase : str = LongformerModel.from_pretrained(__magic_name__ ) lowercase : Union[str, Any] = LightningModel(__magic_name__ ) lowercase : Union[str, Any] = torch.load(__magic_name__ , map_location=torch.device('''cpu''' ) ) lightning_model.load_state_dict(ckpt['''state_dict'''] ) # init longformer question answering model lowercase : Dict = LongformerForQuestionAnswering.from_pretrained(__magic_name__ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(__magic_name__ ) print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--longformer_model', default=None, type=str, required=True, help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.', ) parser.add_argument( '--longformer_question_answering_ckpt_path', default=None, type=str, required=True, help='Path the official PyTorch Lightning Checkpoint.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowerCAmelCase_ = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
308
import os def snake_case( __magic_name__ = "input.txt" ) -> int: '''simple docstring''' with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file: lowercase : Any = [ [int(__magic_name__ ) for element in line.split(''',''' )] for line in input_file.readlines() ] lowercase : List[Any] = len(__magic_name__ ) lowercase : Any = len(matrix[0] ) lowercase : Tuple = [[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )] for i in range(__magic_name__ ): lowercase : str = matrix[i][0] for j in range(1 , __magic_name__ ): for i in range(__magic_name__ ): lowercase : Any = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , __magic_name__ ): lowercase : Any = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowercase : Any = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'''{solution() = }''')
308
1
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format='%(message)s') def snake_case( __magic_name__ ) -> np.ndarray: '''simple docstring''' return input_array.reshape((input_array.size, 1) ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> np.ndarray: '''simple docstring''' lowercase : Tuple = np.nan for i in range(__magic_name__ ): lowercase : int = features[:, labels == i] lowercase : List[str] = data.mean(1 ) # Centralize the data of class i lowercase : Tuple = data - column_reshape(__magic_name__ ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(__magic_name__ , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) lowercase : Any = np.dot(__magic_name__ , centered_data.T ) return covariance_sum / features.shape[1] def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> np.ndarray: '''simple docstring''' lowercase : Tuple = features.mean(1 ) lowercase : Tuple = np.nan for i in range(__magic_name__ ): lowercase : str = features[:, labels == i] lowercase : int = data.shape[1] lowercase : Tuple = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(__magic_name__ ) - column_reshape(__magic_name__ ) , (column_reshape(__magic_name__ ) - column_reshape(__magic_name__ )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) lowercase : List[Any] = device_data * np.dot( column_reshape(__magic_name__ ) - column_reshape(__magic_name__ ) , (column_reshape(__magic_name__ ) - column_reshape(__magic_name__ )).T , ) return covariance_sum / features.shape[1] def snake_case( __magic_name__ , __magic_name__ ) -> np.ndarray: '''simple docstring''' if features.any(): lowercase : Union[str, Any] = features.mean(1 ) # Center the dataset lowercase : Any = features - np.reshape(__magic_name__ , (data_mean.size, 1) ) lowercase : Tuple = np.dot(__magic_name__ , centered_data.T ) / features.shape[1] lowercase , lowercase : Optional[Any] = np.linalg.eigh(__magic_name__ ) # Take all the columns in the reverse order (-1), and then takes only the first lowercase : Any = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space lowercase : Tuple = np.dot(filtered_eigenvectors.T , __magic_name__ ) logging.info('''Principal Component Analysis computed''' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__magic_name__ ) logging.error('''Dataset empty''' ) raise AssertionError def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> np.ndarray: '''simple docstring''' assert classes > dimensions # Check if features have been already loaded if features.any: lowercase , lowercase : List[str] = eigh( covariance_between_classes(__magic_name__ , __magic_name__ , __magic_name__ ) , covariance_within_classes(__magic_name__ , __magic_name__ , __magic_name__ ) , ) lowercase : Union[str, Any] = eigenvectors[:, ::-1][:, :dimensions] lowercase , lowercase , lowercase : int = np.linalg.svd(__magic_name__ ) lowercase : Tuple = svd_matrix[:, 0:dimensions] lowercase : Any = np.dot(filtered_svd_matrix.T , __magic_name__ ) logging.info('''Linear Discriminant Analysis computed''' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__magic_name__ ) logging.error('''Dataset empty''' ) raise AssertionError def snake_case( ) -> None: '''simple docstring''' lowercase : Optional[Any] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) lowercase : Optional[int] = np.array([0, 0, 0, 1, 1] ) lowercase : str = 2 lowercase : List[str] = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(__magic_name__ ) as error_info: lowercase : str = linear_discriminant_analysis( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) if isinstance(__magic_name__ , np.ndarray ): raise AssertionError( '''Did not raise AssertionError for dimensions > classes''' ) assert error_info.type is AssertionError def snake_case( ) -> None: '''simple docstring''' lowercase : Optional[int] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) lowercase : Union[str, Any] = 2 lowercase : Union[str, Any] = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] ) with pytest.raises(__magic_name__ ) as error_info: lowercase : Dict = principal_component_analysis(__magic_name__ , __magic_name__ ) if not np.allclose(__magic_name__ , __magic_name__ ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
308
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): @slow def __a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' ) lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' ) lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids lowercase : List[Any] = model(_A , labels=_A ).loss lowercase : Dict = -tf.math.reduce_mean(_A ).numpy() lowercase : Union[str, Any] = -21.228_168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
308
1
from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake lowerCAmelCase_ = numpy.array([0, 0]) lowerCAmelCase_ = numpy.array([0.5, 0.8_6_6_0_2_5_4]) lowerCAmelCase_ = numpy.array([1, 0]) lowerCAmelCase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def snake_case( __magic_name__ , __magic_name__ ) -> list[numpy.ndarray]: '''simple docstring''' lowercase : List[Any] = initial_vectors for _ in range(__magic_name__ ): lowercase : int = iteration_step(__magic_name__ ) return vectors def snake_case( __magic_name__ ) -> list[numpy.ndarray]: '''simple docstring''' lowercase : List[Any] = [] for i, start_vector in enumerate(vectors[:-1] ): lowercase : List[str] = vectors[i + 1] new_vectors.append(__magic_name__ ) lowercase : Any = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def snake_case( __magic_name__ , __magic_name__ ) -> numpy.ndarray: '''simple docstring''' lowercase : str = numpy.radians(__magic_name__ ) lowercase , lowercase : Optional[Any] = numpy.cos(__magic_name__ ), numpy.sin(__magic_name__ ) lowercase : Union[str, Any] = numpy.array(((c, -s), (s, c)) ) return numpy.dot(__magic_name__ , __magic_name__ ) def snake_case( __magic_name__ ) -> None: '''simple docstring''' lowercase : Optional[int] = plt.gca() axes.set_aspect('''equal''' ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() lowercase , lowercase : Optional[Any] = zip(*__magic_name__ ) plt.plot(__magic_name__ , __magic_name__ ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase_ = iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
308
from heapq import heappop, heappush import numpy as np def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> tuple[float | int, list[tuple[int, int]]]: '''simple docstring''' lowercase , lowercase : Optional[int] = grid.shape lowercase : Optional[int] = [-1, 1, 0, 0] lowercase : List[str] = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] lowercase , lowercase : Union[str, Any] = [(0, source)], set() lowercase : List[str] = np.full((rows, cols) , np.inf ) lowercase : Dict = 0 lowercase : Dict = np.empty((rows, cols) , dtype=__magic_name__ ) lowercase : Any = None while queue: ((lowercase) , (lowercase)) : Optional[Any] = heappop(__magic_name__ ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: lowercase : Tuple = [] while (x, y) != source: path.append((x, y) ) lowercase , lowercase : Optional[int] = predecessors[x, y] path.append(__magic_name__ ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(__magic_name__ ) ): lowercase , lowercase : Optional[int] = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: lowercase : List[Any] = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(__magic_name__ , (dist + 1, (nx, ny)) ) lowercase : int = dist + 1 lowercase : Optional[Any] = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
308
1
from math import factorial def snake_case( __magic_name__ = 1_00 ) -> int: '''simple docstring''' return sum(int(__magic_name__ ) for x in str(factorial(__magic_name__ ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
308
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase_ = { 'configuration_mask2former': [ 'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Mask2FormerConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['Mask2FormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'Mask2FormerForUniversalSegmentation', 'Mask2FormerModel', 'Mask2FormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
308
1
lowerCAmelCase_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' lowercase : Tuple = [False] * len(__magic_name__ ) lowercase : Union[str, Any] = [s] lowercase : List[str] = True while queue: lowercase : Any = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__magic_name__ ) lowercase : int = True lowercase : Union[str, Any] = u return visited[t] def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' lowercase : Optional[int] = [-1] * (len(__magic_name__ )) lowercase : List[Any] = 0 lowercase : Union[str, Any] = [] lowercase : List[str] = [i[:] for i in graph] # Record original cut, copy. while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): lowercase : Union[str, Any] = float('''Inf''' ) lowercase : Optional[int] = sink while s != source: # Find the minimum value in select path lowercase : Any = min(__magic_name__ , graph[parent[s]][s] ) lowercase : str = parent[s] max_flow += path_flow lowercase : Dict = sink while v != source: lowercase : List[Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowercase : str = parent[v] for i in range(len(__magic_name__ ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
308
def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : List[Any] = abs(__magic_name__ ) lowercase : Optional[Any] = 0 while n > 0: res += n % 10 n //= 10 return res def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : Optional[int] = abs(__magic_name__ ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def snake_case( __magic_name__ ) -> int: '''simple docstring''' return sum(int(__magic_name__ ) for c in str(abs(__magic_name__ ) ) ) def snake_case( ) -> None: '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(__magic_name__ , __magic_name__ ) -> None: lowercase : str = F"""{func.__name__}({value})""" lowercase : Any = timeit(F"""__main__.{call}""" , setup='''import __main__''' ) print(F"""{call:56} = {func(__magic_name__ )} -- {timing:.4f} seconds""" ) for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(__magic_name__ , __magic_name__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
308
1
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig lowerCAmelCase_ = logging.get_logger(__name__) class _A : def __init__( self : Optional[Any] , _A : Any , _A : Union[str, Any] ) -> Any: """simple docstring""" lowercase : Optional[Any] = question_encoder lowercase : Union[str, Any] = generator lowercase : Union[str, Any] = self.question_encoder def __a ( self : str , _A : Optional[int] ) -> Any: """simple docstring""" if os.path.isfile(_A ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(_A , exist_ok=_A ) lowercase : Any = os.path.join(_A , '''question_encoder_tokenizer''' ) lowercase : int = os.path.join(_A , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(_A ) self.generator.save_pretrained(_A ) @classmethod def __a ( cls : Any , _A : Any , **_A : Any ) -> int: """simple docstring""" from ..auto.tokenization_auto import AutoTokenizer lowercase : Dict = kwargs.pop('''config''' , _A ) if config is None: lowercase : List[str] = RagConfig.from_pretrained(_A ) lowercase : List[Any] = AutoTokenizer.from_pretrained( _A , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) lowercase : List[Any] = AutoTokenizer.from_pretrained( _A , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=_A , generator=_A ) def __call__( self : Optional[Any] , *_A : int , **_A : List[Any] ) -> Union[str, Any]: """simple docstring""" return self.current_tokenizer(*_A , **_A ) def __a ( self : List[str] , *_A : int , **_A : Optional[Any] ) -> List[str]: """simple docstring""" return self.generator.batch_decode(*_A , **_A ) def __a ( self : List[Any] , *_A : Tuple , **_A : Optional[Any] ) -> Optional[int]: """simple docstring""" return self.generator.decode(*_A , **_A ) def __a ( self : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase : str = self.question_encoder def __a ( self : Any ) -> Any: """simple docstring""" lowercase : int = self.generator def __a ( self : Tuple , _A : List[str] , _A : Optional[List[str]] = None , _A : Optional[int] = None , _A : Optional[int] = None , _A : str = "longest" , _A : str = None , _A : bool = True , **_A : str , ) -> BatchEncoding: """simple docstring""" warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , _A , ) if max_length is None: lowercase : Any = self.current_tokenizer.model_max_length lowercase : Any = self( _A , add_special_tokens=_A , return_tensors=_A , max_length=_A , padding=_A , truncation=_A , **_A , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: lowercase : Any = self.current_tokenizer.model_max_length lowercase : str = self( text_target=_A , add_special_tokens=_A , return_tensors=_A , padding=_A , max_length=_A , truncation=_A , **_A , ) lowercase : List[Any] = labels['''input_ids'''] return model_inputs
308
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def snake_case( ) -> List[str]: '''simple docstring''' lowercase : Any = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=__magic_name__ ) lowercase : Optional[Any] = parser.add_subparsers(help='''accelerate command helpers''' ) # Register commands get_config_parser(subparsers=__magic_name__ ) env_command_parser(subparsers=__magic_name__ ) launch_command_parser(subparsers=__magic_name__ ) tpu_command_parser(subparsers=__magic_name__ ) test_command_parser(subparsers=__magic_name__ ) # Let's go lowercase : Dict = parser.parse_args() if not hasattr(__magic_name__ , '''func''' ): parser.print_help() exit(1 ) # Run args.func(__magic_name__ ) if __name__ == "__main__": main()
308
1
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _A ( _lowerCamelCase ): _UpperCamelCase : Tuple = ['''image_processor''', '''tokenizer'''] _UpperCamelCase : List[str] = '''AutoImageProcessor''' _UpperCamelCase : Optional[int] = '''AutoTokenizer''' def __init__( self : int , _A : Tuple , _A : str ) -> Union[str, Any]: """simple docstring""" super().__init__(_A , _A ) lowercase : str = self.image_processor def __call__( self : Dict , _A : Any=None , _A : List[Any]=None , _A : List[str]=None , **_A : Tuple ) -> Dict: """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: lowercase : str = self.tokenizer(_A , return_tensors=_A , **_A ) if images is not None: lowercase : List[Any] = self.image_processor(_A , return_tensors=_A , **_A ) if text is not None and images is not None: lowercase : Any = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A ) , tensor_type=_A ) def __a ( self : int , *_A : Dict , **_A : Tuple ) -> int: """simple docstring""" return self.tokenizer.batch_decode(*_A , **_A ) def __a ( self : str , *_A : Optional[Any] , **_A : List[str] ) -> str: """simple docstring""" return self.tokenizer.decode(*_A , **_A ) @property def __a ( self : Dict ) -> str: """simple docstring""" return ["input_ids", "attention_mask", "pixel_values"]
308
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def snake_case( __magic_name__ , __magic_name__=False ) -> List[str]: '''simple docstring''' lowercase : List[Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''module.cls_token''', '''vit.embeddings.cls_token'''), ('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''module.pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''module.norm.weight''', '''layernorm.weight'''), ('''module.norm.bias''', '''layernorm.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowercase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def snake_case( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: lowercase : Optional[int] = '''''' else: lowercase : List[Any] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase : Tuple = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" ) lowercase : List[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowercase : Tuple = in_proj_weight[ : config.hidden_size, : ] lowercase : str = in_proj_bias[: config.hidden_size] lowercase : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase : Dict = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase : Any = in_proj_weight[ -config.hidden_size :, : ] lowercase : Optional[int] = in_proj_bias[-config.hidden_size :] def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : str = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__magic_name__ , __magic_name__ ) def snake_case( __magic_name__ ) -> Tuple: '''simple docstring''' lowercase : Any = [ '''module.fc.fc1.weight''', '''module.fc.fc1.bias''', '''module.fc.bn1.weight''', '''module.fc.bn1.bias''', '''module.fc.bn1.running_mean''', '''module.fc.bn1.running_var''', '''module.fc.bn1.num_batches_tracked''', '''module.fc.fc2.weight''', '''module.fc.fc2.bias''', '''module.fc.bn2.weight''', '''module.fc.bn2.bias''', '''module.fc.bn2.running_mean''', '''module.fc.bn2.running_var''', '''module.fc.bn2.num_batches_tracked''', '''module.fc.fc3.weight''', '''module.fc.fc3.bias''', ] for k in ignore_keys: state_dict.pop(__magic_name__ , __magic_name__ ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' lowercase : List[Any] = dct.pop(__magic_name__ ) lowercase : Union[str, Any] = val def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' lowercase : Optional[Any] = ViTMSNConfig() lowercase : str = 10_00 lowercase : List[str] = '''datasets/huggingface/label-files''' lowercase : List[str] = '''imagenet-1k-id2label.json''' lowercase : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ ) , '''r''' ) ) lowercase : Union[str, Any] = {int(__magic_name__ ): v for k, v in idalabel.items()} lowercase : Any = idalabel lowercase : List[Any] = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: lowercase : int = 3_84 lowercase : Optional[Any] = 15_36 lowercase : Tuple = 6 elif "l16" in checkpoint_url: lowercase : Union[str, Any] = 10_24 lowercase : List[str] = 40_96 lowercase : int = 24 lowercase : Union[str, Any] = 16 lowercase : Tuple = 0.1 elif "b4" in checkpoint_url: lowercase : Union[str, Any] = 4 elif "l7" in checkpoint_url: lowercase : Dict = 7 lowercase : List[Any] = 10_24 lowercase : str = 40_96 lowercase : int = 24 lowercase : Dict = 16 lowercase : Tuple = 0.1 lowercase : int = ViTMSNModel(__magic_name__ ) lowercase : List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''target_encoder'''] lowercase : Any = ViTImageProcessor(size=config.image_size ) remove_projection_head(__magic_name__ ) lowercase : List[str] = create_rename_keys(__magic_name__ , base_model=__magic_name__ ) for src, dest in rename_keys: rename_key(__magic_name__ , __magic_name__ , __magic_name__ ) read_in_q_k_v(__magic_name__ , __magic_name__ , base_model=__magic_name__ ) model.load_state_dict(__magic_name__ ) model.eval() lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) lowercase : Dict = ViTImageProcessor( size=config.image_size , image_mean=__magic_name__ , image_std=__magic_name__ ) lowercase : List[str] = image_processor(images=__magic_name__ , return_tensors='''pt''' ) # forward pass torch.manual_seed(2 ) lowercase : int = model(**__magic_name__ ) lowercase : Optional[Any] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: lowercase : List[str] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] ) elif "b16" in checkpoint_url: lowercase : Any = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] ) elif "l16" in checkpoint_url: lowercase : Dict = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] ) elif "b4" in checkpoint_url: lowercase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] ) else: lowercase : Optional[int] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , __magic_name__ , atol=1e-4 ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(__magic_name__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__magic_name__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar', type=str, help='URL of the checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowerCAmelCase_ = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
308
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'} lowerCAmelCase_ = { 'vocab_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model' ), }, } lowerCAmelCase_ = { 'moussaKam/mbarthez': 10_24, 'moussaKam/barthez': 10_24, 'moussaKam/barthez-orangesum-title': 10_24, } lowerCAmelCase_ = '▁' class _A ( _lowerCamelCase ): _UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES _UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : int = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[Any] , _A : Optional[int] , _A : List[Any]="<s>" , _A : List[str]="</s>" , _A : int="</s>" , _A : Any="<s>" , _A : Optional[int]="<unk>" , _A : Tuple="<pad>" , _A : Tuple="<mask>" , _A : Optional[Dict[str, Any]] = None , **_A : int , ) -> None: """simple docstring""" lowercase : Union[str, Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token lowercase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , ) lowercase : int = vocab_file lowercase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_A ) ) lowercase : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} lowercase : Optional[int] = len(self.sp_model ) - 1 lowercase : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __a ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase : Optional[Any] = [self.cls_token_id] lowercase : Dict = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is None: return [1] + ([0] * len(_A )) + [1] return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1] def __a ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase : str = [self.sep_token_id] lowercase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __a ( self : int ) -> Tuple: """simple docstring""" return len(self.sp_model ) def __a ( self : List[str] ) -> Optional[Any]: """simple docstring""" lowercase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __a ( self : Any , _A : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(_A , out_type=_A ) def __a ( self : Optional[int] , _A : Optional[Any] ) -> Tuple: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase : Any = self.sp_model.PieceToId(_A ) return spm_id if spm_id else self.unk_token_id def __a ( self : Optional[Any] , _A : Optional[int] ) -> Optional[Any]: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(_A ) def __a ( self : List[str] , _A : str ) -> Dict: """simple docstring""" lowercase : Union[str, Any] = [] lowercase : Any = '''''' lowercase : Optional[Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_A ) + token lowercase : str = True lowercase : int = [] else: current_sub_tokens.append(_A ) lowercase : List[str] = False out_string += self.sp_model.decode(_A ) return out_string.strip() def __getstate__( self : Union[str, Any] ) -> List[Any]: """simple docstring""" lowercase : Any = self.__dict__.copy() lowercase : Union[str, Any] = None return state def __setstate__( self : List[Any] , _A : Dict ) -> int: """simple docstring""" lowercase : str = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase : List[str] = {} lowercase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __a ( self : int , _A : str , _A : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase : Optional[Any] = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _A ) elif not os.path.isfile(self.vocab_file ): with open(_A , '''wb''' ) as fi: lowercase : List[Any] = self.sp_model.serialized_model_proto() fi.write(_A ) return (out_vocab_file,)
308
def snake_case( __magic_name__ , __magic_name__ ) -> float: '''simple docstring''' return price * (1 + tax_rate) if __name__ == "__main__": print(f'''{price_plus_tax(1_00, 0.2_5) = }''') print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
308
1
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler') class _A : def __init__( self : str , _A : str , _A : Dict , _A : bool = True , _A : bool = False ) -> Dict: """simple docstring""" lowercase : Optional[int] = scheduler lowercase : List[str] = optimizers if isinstance(_A , (list, tuple) ) else [optimizers] lowercase : Tuple = split_batches lowercase : Optional[int] = step_with_optimizer lowercase : Tuple = GradientState() def __a ( self : Union[str, Any] , *_A : Union[str, Any] , **_A : Optional[Any] ) -> Optional[int]: """simple docstring""" if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*_A , **_A ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*_A , **_A ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step lowercase : Tuple = AcceleratorState().num_processes for _ in range(_A ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , '''total_steps''' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*_A , **_A ) else: self.scheduler.step(*_A , **_A ) def __a ( self : str ) -> Optional[int]: """simple docstring""" return self.scheduler.get_last_lr() def __a ( self : str ) -> str: """simple docstring""" return self.scheduler.state_dict() def __a ( self : Dict , _A : Dict ) -> str: """simple docstring""" self.scheduler.load_state_dict(_A ) def __a ( self : Dict ) -> List[str]: """simple docstring""" return self.scheduler.get_lr() def __a ( self : List[Any] , *_A : List[Any] , **_A : str ) -> int: """simple docstring""" return self.scheduler.print_lr(*_A , **_A )
308
import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class _A ( _lowerCamelCase ): def __init__( self : Tuple , _A : Dict , _A : Tuple , _A : List[Any]=1_024 , _A : str=1_024 , _A : str=3.6 ) -> Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = tokenizer lowercase : List[Any] = tokenizer.bos_token_id lowercase : Union[str, Any] = dataset lowercase : Union[str, Any] = seq_length lowercase : Optional[int] = seq_length * chars_per_token * num_of_sequences def __iter__( self : int ) -> int: """simple docstring""" lowercase : Dict = iter(self.dataset ) lowercase : Union[str, Any] = True while more_examples: lowercase , lowercase : Tuple = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(_A )['''content'''] ) buffer_len += len(buffer[-1] ) except StopIteration: lowercase : List[str] = False break lowercase : str = tokenizer(_A , truncation=_A )['''input_ids'''] lowercase : List[str] = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(_A ) , self.seq_length ): lowercase : int = all_token_ids[i : i + self.seq_length] if len(_A ) == self.seq_length: yield torch.tensor(_A ) def snake_case( __magic_name__ ) -> Optional[Any]: '''simple docstring''' lowercase : List[str] = {'''streaming''': True} lowercase : Dict = load_dataset(args.dataset_name , split='''train''' , **__magic_name__ ) lowercase : int = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length ) lowercase : Tuple = DataLoader(__magic_name__ , batch_size=args.batch_size ) return eval_dataloader def snake_case( __magic_name__ ) -> str: '''simple docstring''' model.eval() lowercase : str = [] for step, batch in enumerate(__magic_name__ ): with torch.no_grad(): lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ ) lowercase : List[Any] = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(__magic_name__ ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break lowercase : Union[str, Any] = torch.mean(torch.cat(__magic_name__ ) ) try: lowercase : Tuple = torch.exp(__magic_name__ ) except OverflowError: lowercase : List[str] = float('''inf''' ) return loss.item(), perplexity.item() # Setup Accelerator lowerCAmelCase_ = Accelerator() # Parse configuration lowerCAmelCase_ = HfArgumentParser(EvaluationArguments) lowerCAmelCase_ = parser.parse_args() set_seed(args.seed) # Logging lowerCAmelCase_ = logging.getLogger(__name__) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) # Load model and tokenizer lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt) lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader lowerCAmelCase_ = create_dataloader(args) # Prepare everything with our `accelerator`. lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('Evaluating and saving model after training') lowerCAmelCase_ , lowerCAmelCase_ = evaluate(args) logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
308
1
def snake_case( __magic_name__ = "The quick brown fox jumps over the lazy dog" , ) -> bool: '''simple docstring''' lowercase : List[str] = set() # Replace all the whitespace in our sentence lowercase : str = input_str.replace(''' ''' , '''''' ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(__magic_name__ ) == 26 def snake_case( __magic_name__ = "The quick brown fox jumps over the lazy dog" , ) -> bool: '''simple docstring''' lowercase : Optional[int] = [False] * 26 for char in input_str: if char.islower(): lowercase : Tuple = True elif char.isupper(): lowercase : Any = True return all(__magic_name__ ) def snake_case( __magic_name__ = "The quick brown fox jumps over the lazy dog" , ) -> bool: '''simple docstring''' return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def snake_case( ) -> None: '''simple docstring''' from timeit import timeit lowercase : List[str] = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest''' print(timeit('''is_pangram()''' , setup=__magic_name__ ) ) print(timeit('''is_pangram_faster()''' , setup=__magic_name__ ) ) print(timeit('''is_pangram_fastest()''' , setup=__magic_name__ ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
308
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def snake_case( __magic_name__ ) -> Optional[Any]: '''simple docstring''' assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def snake_case( ) -> Optional[Any]: '''simple docstring''' assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def snake_case( ) -> int: '''simple docstring''' lowercase : List[str] = '''mock-s3-bucket''' lowercase : Optional[int] = F"""s3://{mock_bucket}""" lowercase : List[Any] = extract_path_from_uri(__magic_name__ ) assert dataset_path.startswith('''s3://''' ) is False lowercase : Optional[int] = '''./local/path''' lowercase : Dict = extract_path_from_uri(__magic_name__ ) assert dataset_path == new_dataset_path def snake_case( __magic_name__ ) -> Optional[Any]: '''simple docstring''' lowercase : Tuple = is_remote_filesystem(__magic_name__ ) assert is_remote is True lowercase : int = fsspec.filesystem('''file''' ) lowercase : Optional[Any] = is_remote_filesystem(__magic_name__ ) assert is_remote is False @pytest.mark.parametrize('''compression_fs_class''' , __magic_name__ ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]: '''simple docstring''' lowercase : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file} lowercase : List[Any] = input_paths[compression_fs_class.protocol] if input_path is None: lowercase : Dict = F"""for '{compression_fs_class.protocol}' compression protocol, """ if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(__magic_name__ ) lowercase : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__magic_name__ ) assert isinstance(__magic_name__ , __magic_name__ ) lowercase : List[Any] = os.path.basename(__magic_name__ ) lowercase : Tuple = expected_filename[: expected_filename.rindex('''.''' )] assert fs.glob('''*''' ) == [expected_filename] with fs.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f, open(__magic_name__ , encoding='''utf-8''' ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]: '''simple docstring''' lowercase : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path} lowercase : List[str] = compressed_file_paths[protocol] lowercase : str = '''dataset.jsonl''' lowercase : List[str] = F"""{protocol}://{member_file_path}::{compressed_file_path}""" lowercase , *lowercase : Tuple = fsspec.get_fs_token_paths(__magic_name__ ) assert fs.isfile(__magic_name__ ) assert not fs.isfile('''non_existing_''' + member_file_path ) @pytest.mark.integration def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' lowercase : Optional[Any] = hf_api.dataset_info(__magic_name__ , token=__magic_name__ ) lowercase : int = HfFileSystem(repo_info=__magic_name__ , token=__magic_name__ ) assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"] assert hffs.isdir('''data''' ) assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' ) with open(__magic_name__ ) as f: assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read() def snake_case( ) -> List[Any]: '''simple docstring''' lowercase : List[Any] = '''bz2''' # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(__magic_name__ , __magic_name__ , clobber=__magic_name__ ) with pytest.warns(__magic_name__ ) as warning_info: importlib.reload(datasets.filesystems ) assert len(__magic_name__ ) == 1 assert ( str(warning_info[0].message ) == F"""A filesystem protocol was already set for {protocol} and will be overwritten.""" )
308
1
def snake_case( __magic_name__ , __magic_name__ ) -> list: '''simple docstring''' lowercase : List[Any] = word.split() def justify(__magic_name__ , __magic_name__ , __magic_name__ ) -> str: lowercase : Union[str, Any] = max_width - width lowercase : List[str] = len(__magic_name__ ) if len(__magic_name__ ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: lowercase : str = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] lowercase : Optional[Any] = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] lowercase : List[Any] = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(__magic_name__ ): num_spaces_between_words_list[i] += 1 lowercase : Optional[Any] = [] for i in range(__magic_name__ ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(__magic_name__ ) lowercase : Any = [] lowercase : list[str] = [] lowercase : Tuple = 0 for word in words: if width + len(__magic_name__ ) + len(__magic_name__ ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(__magic_name__ ) width += len(__magic_name__ ) else: # justify the line and add it to result answer.append(justify(__magic_name__ , __magic_name__ , __magic_name__ ) ) # reset new line and new width lowercase , lowercase : Dict = [word], len(__magic_name__ ) lowercase : str = max_width - width - len(__magic_name__ ) answer.append(''' '''.join(__magic_name__ ) + (remaining_spaces + 1) * ''' ''' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
308
import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowerCAmelCase_ = logging.get_logger(__name__) class _A ( enum.Enum ): _UpperCamelCase : Union[str, Any] = 0 _UpperCamelCase : Any = 1 @add_end_docstrings(_lowerCamelCase ) class _A ( _lowerCamelCase ): _UpperCamelCase : List[Any] = '''generated''' def __init__( self : str , *_A : int , **_A : str ) -> Union[str, Any]: """simple docstring""" super().__init__(*_A , **_A ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def __a ( self : int , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=None , _A : Dict=None , _A : Union[str, Any]=None , _A : int=None , **_A : Optional[int] , ) -> List[Any]: """simple docstring""" lowercase : str = {} if truncation is not None: lowercase : Tuple = truncation lowercase : Tuple = generate_kwargs lowercase : Optional[Any] = {} if return_tensors is not None and return_type is None: lowercase : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: lowercase : Dict = return_type if clean_up_tokenization_spaces is not None: lowercase : Dict = clean_up_tokenization_spaces if stop_sequence is not None: lowercase : Dict = self.tokenizer.encode(_A , add_special_tokens=_A ) if len(_A ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) lowercase : List[str] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def __a ( self : str , _A : int , _A : int , _A : int ) -> List[Any]: """simple docstring""" return True def __a ( self : Union[str, Any] , *_A : Union[str, Any] , _A : List[Any] ) -> Dict: """simple docstring""" lowercase : Tuple = self.model.config.prefix if self.model.config.prefix is not None else '''''' if isinstance(args[0] , _A ): if self.tokenizer.pad_token_id is None: raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' ) lowercase : List[Any] = ([prefix + arg for arg in args[0]],) lowercase : Dict = True elif isinstance(args[0] , _A ): lowercase : Optional[int] = (prefix + args[0],) lowercase : Union[str, Any] = False else: raise ValueError( f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" ) lowercase : Any = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self : Union[str, Any] , *_A : Optional[int] , **_A : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase : Any = super().__call__(*_A , **_A ) if ( isinstance(args[0] , _A ) and all(isinstance(_A , _A ) for el in args[0] ) and all(len(_A ) == 1 for res in result ) ): return [res[0] for res in result] return result def __a ( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : List[str] ) -> List[Any]: """simple docstring""" lowercase : Optional[int] = self._parse_and_tokenize(_A , truncation=_A , **_A ) return inputs def __a ( self : int , _A : Optional[Any] , **_A : Any ) -> Any: """simple docstring""" if self.framework == "pt": lowercase , lowercase : List[Any] = model_inputs['''input_ids'''].shape elif self.framework == "tf": lowercase , lowercase : Optional[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy() lowercase : int = generate_kwargs.get('''min_length''' , self.model.config.min_length ) lowercase : Optional[int] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] ) lowercase : int = self.model.generate(**_A , **_A ) lowercase : int = output_ids.shape[0] if self.framework == "pt": lowercase : Optional[Any] = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": lowercase : Tuple = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def __a ( self : Union[str, Any] , _A : str , _A : Optional[int]=ReturnType.TEXT , _A : Optional[int]=False ) -> Tuple: """simple docstring""" lowercase : Any = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: lowercase : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids} elif return_type == ReturnType.TEXT: lowercase : Dict = { f"""{self.return_name}_text""": self.tokenizer.decode( _A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) } records.append(_A ) return records @add_end_docstrings(_lowerCamelCase ) class _A ( _lowerCamelCase ): _UpperCamelCase : List[str] = '''summary''' def __call__( self : List[Any] , *_A : List[str] , **_A : Union[str, Any] ) -> Optional[int]: """simple docstring""" return super().__call__(*_A , **_A ) def __a ( self : Any , _A : int , _A : int , _A : int ) -> bool: """simple docstring""" if max_length < min_length: logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" ) if input_length < max_length: logger.warning( f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """ '''a summarization task, where outputs shorter than the input are typically wanted, you might ''' f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" ) @add_end_docstrings(_lowerCamelCase ) class _A ( _lowerCamelCase ): _UpperCamelCase : List[str] = '''translation''' def __a ( self : Union[str, Any] , _A : int , _A : int , _A : int ) -> List[Any]: """simple docstring""" if input_length > 0.9 * max_length: logger.warning( f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """ '''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' ) return True def __a ( self : Optional[Any] , *_A : Optional[Any] , _A : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , _A : List[Any]=None , _A : Any=None ) -> Dict: """simple docstring""" if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ): return self.tokenizer._build_translation_inputs( *_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A ) else: return super()._parse_and_tokenize(*_A , truncation=_A ) def __a ( self : Any , _A : Tuple=None , _A : Any=None , **_A : Any ) -> Optional[int]: """simple docstring""" lowercase , lowercase , lowercase : Dict = super()._sanitize_parameters(**_A ) if src_lang is not None: lowercase : Optional[Any] = src_lang if tgt_lang is not None: lowercase : Dict = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. lowercase : Dict = kwargs.get('''task''' , self.task ) lowercase : List[str] = task.split('''_''' ) if task and len(_A ) == 4: # translation, XX, to YY lowercase : Any = items[1] lowercase : List[str] = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self : Tuple , *_A : Union[str, Any] , **_A : List[Any] ) -> List[Any]: """simple docstring""" return super().__call__(*_A , **_A )
308
1
from math import ceil def snake_case( __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' lowercase : List[str] = list(range(0 , __magic_name__ ) ) lowercase : Any = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check lowercase : int = [] for i in device_map_blocks: if device_map_blocks.count(__magic_name__ ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(__magic_name__ ) # Missing blocks lowercase : Optional[int] = [i for i in blocks if i not in device_map_blocks] lowercase : List[Any] = [i for i in device_map_blocks if i not in blocks] if len(__magic_name__ ) != 0: raise ValueError( '''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.''' ''' These attention blocks were specified more than once: ''' + str(__magic_name__ ) ) if len(__magic_name__ ) != 0: raise ValueError( '''There are attention blocks for this model that are not specified in the device_map. Add these attention ''' '''blocks to a device on the device_map: ''' + str(__magic_name__ ) ) if len(__magic_name__ ) != 0: raise ValueError( '''The device_map contains more attention blocks than this model has. Remove these from the device_map:''' + str(__magic_name__ ) ) def snake_case( __magic_name__ , __magic_name__ ) -> List[Any]: '''simple docstring''' lowercase : str = list(range(__magic_name__ ) ) lowercase : int = int(ceil(n_layers / len(__magic_name__ ) ) ) lowercase : Union[str, Any] = [layers[i : i + n_blocks] for i in range(0 , __magic_name__ , __magic_name__ )] return dict(zip(__magic_name__ , __magic_name__ ) )
308
# Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCAmelCase_ = get_logger(__name__) class _A : _UpperCamelCase : int = '''dummy_data''' _UpperCamelCase : Tuple = '''datasets''' _UpperCamelCase : Optional[int] = False def __init__( self : Any , _A : str , _A : str , _A : Union[Version, str] , _A : Optional[str] = None , _A : bool = False , _A : bool = True , _A : Optional[List[Callable]] = None , ) -> Dict: """simple docstring""" lowercase : Tuple = 0 lowercase : List[Any] = dataset_name lowercase : int = cache_dir lowercase : str = use_local_dummy_data lowercase : Union[str, Any] = config # download_callbacks take a single url as input lowercase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root lowercase : Any = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general lowercase : Union[str, Any] = str(_A ) # to be downloaded lowercase : Tuple = None lowercase : Optional[int] = None @property def __a ( self : str ) -> Dict: """simple docstring""" if self._dummy_file is None: lowercase : Optional[Any] = self.download_dummy_data() return self._dummy_file @property def __a ( self : int ) -> Optional[Any]: """simple docstring""" if self.config is not None: # structure is dummy / config_name / version_name return os.path.join('''dummy''' , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join('''dummy''' , self.version_name ) @property def __a ( self : List[Any] ) -> int: """simple docstring""" return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' ) def __a ( self : str ) -> int: """simple docstring""" lowercase : str = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) lowercase : List[str] = cached_path( _A , cache_dir=self.cache_dir , extract_compressed_file=_A , force_extract=_A ) return os.path.join(_A , self.dummy_file_name ) @property def __a ( self : str ) -> Tuple: """simple docstring""" return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def __a ( self : Optional[int] ) -> Optional[int]: """simple docstring""" if self._bucket_url is None: lowercase : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) ) return self._bucket_url @property def __a ( self : Tuple ) -> List[str]: """simple docstring""" if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] ) def __a ( self : Union[str, Any] , _A : Dict , *_A : Union[str, Any] ) -> Optional[Any]: """simple docstring""" if self.load_existing_dummy_data: # dummy data is downloaded and tested lowercase : Union[str, Any] = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned lowercase : Optional[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(_A , _A ): return self.create_dummy_data_dict(_A , _A ) elif isinstance(_A , (list, tuple) ): return self.create_dummy_data_list(_A , _A ) else: return self.create_dummy_data_single(_A , _A ) def __a ( self : str , _A : Union[str, Any] , *_A : Dict ) -> Dict: """simple docstring""" return self.download_and_extract(_A ) def __a ( self : str , _A : List[str] , _A : Any ) -> Union[str, Any]: """simple docstring""" return self.download_and_extract(_A ) def __a ( self : Optional[int] , _A : Tuple , *_A : str , **_A : Any ) -> Optional[Any]: """simple docstring""" return path def __a ( self : List[str] ) -> str: """simple docstring""" return {} def __a ( self : List[str] , _A : Union[str, Any] , _A : List[Any] ) -> Optional[Any]: """simple docstring""" lowercase : Any = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(_A , _A ): for single_url in single_urls: download_callback(_A ) else: lowercase : List[str] = single_urls download_callback(_A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(_A , _A ): lowercase : int = [os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) for x in single_urls] else: lowercase : int = single_urls lowercase : Any = os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) lowercase : str = value # make sure that values are unique if all(isinstance(_A , _A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique lowercase : str = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def __a ( self : Optional[int] , _A : List[Any] , _A : Tuple ) -> Tuple: """simple docstring""" lowercase : Optional[Any] = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one lowercase : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , _A ) ) for url in data_url ) lowercase : str = all( url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): lowercase : List[str] = [data_url[0]] * len(_A ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(_A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowercase : Optional[int] = os.path.join(_A , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) ) dummy_data_list.append(_A ) return dummy_data_list def __a ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ) -> List[str]: """simple docstring""" for download_callback in self.download_callbacks: download_callback(_A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowercase : Dict = os.path.join(_A , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) ) if os.path.exists(_A ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def __a ( self : Union[str, Any] ) -> Any: """simple docstring""" pass def __a ( self : Any ) -> Dict: """simple docstring""" pass def __a ( self : int , _A : Optional[Any] ) -> Dict: """simple docstring""" def _iter_archive_members(_A : Optional[int] ): # this preserves the order of the members inside the ZIP archive lowercase : int = Path(self.dummy_file ).parent lowercase : List[str] = path.relative_to(_A ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: lowercase : Optional[int] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(_A ) lowercase : Tuple = Path(_A ) lowercase : List[Any] = _iter_archive_members(_A ) if self.use_local_dummy_data else path.rglob('''*''' ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ): yield file_path.relative_to(_A ).as_posix(), file_path.open('''rb''' ) def __a ( self : Optional[Any] , _A : Dict ) -> Union[str, Any]: """simple docstring""" if not isinstance(_A , _A ): lowercase : Dict = [paths] for path in paths: if os.path.isfile(_A ): if os.path.basename(_A ).startswith(('''.''', '''__''') ): return yield path else: for dirpath, dirnames, filenames in os.walk(_A ): if os.path.basename(_A ).startswith(('''.''', '''__''') ): continue dirnames.sort() for filename in sorted(_A ): if filename.startswith(('''.''', '''__''') ): continue yield os.path.join(_A , _A )
308
1
import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging lowerCAmelCase_ = logging.get_logger(__name__) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False ) -> Optional[int]: '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: lowercase : List[str] = os.path.abspath(__magic_name__ ) logger.info(F"""Loading PyTorch weights from {pt_path}""" ) lowercase : str = torch.load(__magic_name__ , map_location='''cpu''' ) logger.info(F"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" ) lowercase : List[Any] = convert_pytorch_state_dict_to_flax(__magic_name__ , __magic_name__ ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files lowercase : Dict = convert_pytorch_sharded_state_dict_to_flax(__magic_name__ , __magic_name__ ) return flax_state_dict def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> (Tuple[str], np.ndarray): '''simple docstring''' def is_key_or_prefix_key_in_dict(__magic_name__ ) -> bool: return len(set(__magic_name__ ) & {key, (model_prefix,) + key} ) > 0 # layer norm lowercase : str = pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__magic_name__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean lowercase : Union[str, Any] = pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__magic_name__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var lowercase : List[Any] = pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__magic_name__ ): return renamed_pt_tuple_key, pt_tensor # embedding lowercase : Optional[Any] = pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__magic_name__ ): return renamed_pt_tuple_key, pt_tensor # conv layer lowercase : Union[str, Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__magic_name__ ): lowercase : Optional[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowercase : Union[str, Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__magic_name__ ): lowercase : Optional[int] = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowercase : Union[str, Any] = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowercase : Union[str, Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 lowercase : str = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): lowercase : List[Any] = pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): lowercase : str = pt_tuple_key[-2] + '''_v''' if name is not None: lowercase : int = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def snake_case( __magic_name__ , __magic_name__ ) -> Optional[Any]: '''simple docstring''' lowercase : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()} lowercase : Any = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: lowercase : str = flax_model.params['''params'''] else: lowercase : Optional[int] = flax_model.params lowercase : Tuple = flatten_dict(__magic_name__ ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowercase : Tuple = flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(__magic_name__ ) lowercase : List[str] = {} lowercase : Union[str, Any] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowercase : Dict = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowercase : Optional[Any] = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowercase : str = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowercase : int = pt_tuple_key[1:] # Correctly rename weight parameters lowercase , lowercase : Union[str, Any] = rename_key_and_reshape_tensor( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # add model prefix if necessary lowercase : Tuple = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowercase : str = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: lowercase : Union[str, Any] = jnp.asarray(__magic_name__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__magic_name__ , __magic_name__ ) continue # also add unexpected weight so that warning is thrown lowercase : Optional[int] = jnp.asarray(__magic_name__ ) else: # also add unexpected weight so that warning is thrown lowercase : int = jnp.asarray(__magic_name__ ) return unflatten_dict(__magic_name__ ) def snake_case( __magic_name__ , __magic_name__ ) -> Optional[Any]: '''simple docstring''' import torch # Load the index lowercase : Dict = {} for shard_file in shard_filenames: # load using msgpack utils lowercase : List[str] = torch.load(__magic_name__ ) lowercase : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()} lowercase : Any = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowercase : str = flax_model.params['''params'''] lowercase : str = flatten_dict(__magic_name__ ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: lowercase : int = flax_model.params lowercase : Optional[int] = flatten_dict(__magic_name__ ) lowercase : Optional[int] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowercase : int = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowercase : Tuple = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowercase : Optional[int] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowercase : List[Any] = pt_tuple_key[1:] # Correctly rename weight parameters lowercase , lowercase : Optional[Any] = rename_key_and_reshape_tensor( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # add model prefix if necessary lowercase : Optional[Any] = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowercase : List[str] = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: lowercase : List[str] = jnp.asarray(__magic_name__ ) continue if "var" in flax_key[-1]: lowercase : Optional[int] = jnp.asarray(__magic_name__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__magic_name__ , __magic_name__ ) continue # also add unexpected weight so that warning is thrown lowercase : Optional[Any] = jnp.asarray(__magic_name__ ) else: # also add unexpected weight so that warning is thrown lowercase : Tuple = jnp.asarray(__magic_name__ ) return unflatten_dict(__magic_name__ ) def snake_case( __magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' lowercase : Optional[Any] = os.path.abspath(__magic_name__ ) logger.info(F"""Loading Flax weights from {flax_checkpoint_path}""" ) # import correct flax class lowercase : str = getattr(__magic_name__ , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(__magic_name__ , '''rb''' ) as state_f: try: lowercase : Optional[Any] = from_bytes(__magic_name__ , state_f.read() ) except UnpicklingError: raise EnvironmentError(F"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ ) return load_flax_weights_in_pytorch_model(__magic_name__ , __magic_name__ ) def snake_case( __magic_name__ , __magic_name__ ) -> int: '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights lowercase : Optional[Any] = flatten_dict(jax.tree_util.tree_map(lambda __magic_name__ : x.dtype == jnp.bfloataa , __magic_name__ ) ).values() if any(__magic_name__ ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) lowercase : str = jax.tree_util.tree_map( lambda __magic_name__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __magic_name__ ) lowercase : int = flatten_dict(__magic_name__ ) lowercase : Tuple = pt_model.state_dict() lowercase : Dict = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) lowercase : Optional[int] = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys lowercase : Tuple = [] lowercase : int = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowercase : int = flax_key_tuple[0] == pt_model.base_model_prefix lowercase : List[str] = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: lowercase : List[Any] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: lowercase : str = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__magic_name__ ) not in pt_model_dict: # conv layer lowercase : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) lowercase : int = jnp.transpose(__magic_name__ , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__magic_name__ ) not in pt_model_dict: # linear layer lowercase : Dict = flax_key_tuple[:-1] + ('''weight''',) lowercase : Union[str, Any] = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowercase : List[str] = flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: lowercase : Any = flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: lowercase : Any = flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: lowercase : Any = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: lowercase : Union[str, Any] = '''.'''.join(__magic_name__ ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. lowercase : Tuple = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: lowercase : Dict = key.split('''.''' ) lowercase : List[str] = None if key_components[-3::2] == ["parametrizations", "original0"]: lowercase : Any = key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: lowercase : Optional[int] = key_components[-2] + '''_v''' if name is not None: lowercase : List[Any] = key_components[:-3] + [name] lowercase : str = '''.'''.join(__magic_name__ ) lowercase : Any = key if flax_key in special_pt_names: lowercase : List[Any] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """ F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) else: # add weight to pytorch dict lowercase : Tuple = np.asarray(__magic_name__ ) if not isinstance(__magic_name__ , np.ndarray ) else flax_tensor lowercase : Optional[Any] = torch.from_numpy(__magic_name__ ) # remove from missing keys missing_keys.remove(__magic_name__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(__magic_name__ ) pt_model.load_state_dict(__magic_name__ ) # re-transform missing_keys to list lowercase : Any = list(__magic_name__ ) if len(__magic_name__ ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing""" F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture""" ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect""" ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" ) if len(__magic_name__ ) > 0: logger.warning( F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly""" F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to""" ''' use it for predictions and inference.''' ) else: logger.warning( F"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n""" '''If your task is similar to the task the model of the checkpoint was trained on, ''' F"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" ) return pt_model
308
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' lowercase : Union[str, Any] = [False] * len(__magic_name__ ) lowercase : Optional[int] = [] queue.append(__magic_name__ ) lowercase : int = True while queue: lowercase : Union[str, Any] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__magic_name__ ) lowercase : Dict = True lowercase : List[str] = u return visited[t] def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple: '''simple docstring''' lowercase : List[str] = [-1] * (len(__magic_name__ )) lowercase : Tuple = 0 while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): lowercase : Any = float('''Inf''' ) lowercase : str = sink while s != source: # Find the minimum value in select path lowercase : Any = min(__magic_name__ , graph[parent[s]][s] ) lowercase : Dict = parent[s] max_flow += path_flow lowercase : Union[str, Any] = sink while v != source: lowercase : List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowercase : Optional[int] = parent[v] return max_flow lowerCAmelCase_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] lowerCAmelCase_ , lowerCAmelCase_ = 0, 5 print(ford_fulkerson(graph, source, sink))
308
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, PNDMScheduler, StableDiffusionLDMaDPipeline, UNetaDConditionModel, ) from diffusers.utils import nightly, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS enable_full_determinism() class _A ( unittest.TestCase ): _UpperCamelCase : List[str] = StableDiffusionLDMaDPipeline _UpperCamelCase : str = TEXT_TO_IMAGE_PARAMS _UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCamelCase : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS def __a ( self : List[str] ) -> List[Any]: """simple docstring""" torch.manual_seed(0 ) lowercase : Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) lowercase : List[Any] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , ) torch.manual_seed(0 ) lowercase : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) lowercase : Optional[int] = CLIPTextModel(_A ) lowercase : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowercase : Union[str, Any] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __a ( self : Optional[Any] , _A : Any , _A : Dict=0 ) -> Union[str, Any]: """simple docstring""" if str(_A ).startswith('''mps''' ): lowercase : Dict = torch.manual_seed(_A ) else: lowercase : Any = torch.Generator(device=_A ).manual_seed(_A ) lowercase : int = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def __a ( self : Dict ) -> int: """simple docstring""" lowercase : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase : Optional[int] = self.get_dummy_components() lowercase : int = StableDiffusionLDMaDPipeline(**_A ) lowercase : Tuple = ldmad_pipe.to(_A ) ldmad_pipe.set_progress_bar_config(disable=_A ) lowercase : List[Any] = self.get_dummy_inputs(_A ) lowercase : Union[str, Any] = ldmad_pipe(**_A ) lowercase , lowercase : int = output.rgb, output.depth lowercase : Optional[int] = rgb[0, -3:, -3:, -1] lowercase : List[Any] = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) lowercase : List[Any] = np.array( [0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] ) lowercase : List[Any] = np.array([103.46_727, 85.812_004, 87.849_236] ) assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2 assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2 def __a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" lowercase : List[Any] = self.get_dummy_components() lowercase : Optional[int] = StableDiffusionLDMaDPipeline(**_A ) lowercase : int = ldmad_pipe.to(_A ) ldmad_pipe.set_progress_bar_config(disable=_A ) lowercase : int = self.get_dummy_inputs(_A ) lowercase : int = 3 * [inputs['''prompt''']] # forward lowercase : int = ldmad_pipe(**_A ) lowercase , lowercase : List[Any] = output.rgb, output.depth lowercase : int = rgb_slice_a[0, -3:, -3:, -1] lowercase : str = depth_slice_a[0, -3:, -1] lowercase : List[str] = self.get_dummy_inputs(_A ) lowercase : Optional[int] = 3 * [inputs.pop('''prompt''' )] lowercase : List[Any] = ldmad_pipe.tokenizer( _A , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_A , return_tensors='''pt''' , ) lowercase : Tuple = text_inputs['''input_ids'''].to(_A ) lowercase : Any = ldmad_pipe.text_encoder(_A )[0] lowercase : Optional[int] = prompt_embeds # forward lowercase : Any = ldmad_pipe(**_A ) lowercase , lowercase : Union[str, Any] = output.rgb, output.depth lowercase : Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1] lowercase : Any = depth_slice_a[0, -3:, -1] assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4 assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4 def __a ( self : Dict ) -> Union[str, Any]: """simple docstring""" lowercase : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase : int = self.get_dummy_components() lowercase : Optional[int] = PNDMScheduler(skip_prk_steps=_A ) lowercase : List[str] = StableDiffusionLDMaDPipeline(**_A ) lowercase : Optional[int] = ldmad_pipe.to(_A ) ldmad_pipe.set_progress_bar_config(disable=_A ) lowercase : int = self.get_dummy_inputs(_A ) lowercase : Dict = '''french fries''' lowercase : Optional[Any] = ldmad_pipe(**_A , negative_prompt=_A ) lowercase , lowercase : Dict = output.rgb, output.depth lowercase : Union[str, Any] = rgb[0, -3:, -3:, -1] lowercase : str = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) lowercase : Tuple = np.array( [0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] ) lowercase : List[Any] = np.array([107.84_738, 84.62_802, 89.962_135] ) assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2 assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2 @slow @require_torch_gpu class _A ( unittest.TestCase ): def __a ( self : Any ) -> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : Union[str, Any] , _A : str , _A : Tuple="cpu" , _A : int=torch.floataa , _A : List[str]=0 ) -> int: """simple docstring""" lowercase : Dict = torch.Generator(device=_A ).manual_seed(_A ) lowercase : Tuple = np.random.RandomState(_A ).standard_normal((1, 4, 64, 64) ) lowercase : List[str] = torch.from_numpy(_A ).to(device=_A , dtype=_A ) lowercase : Optional[Any] = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def __a ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" lowercase : Optional[int] = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ) lowercase : Any = ldmad_pipe.to(_A ) ldmad_pipe.set_progress_bar_config(disable=_A ) lowercase : Union[str, Any] = self.get_inputs(_A ) lowercase : int = ldmad_pipe(**_A ) lowercase , lowercase : Dict = output.rgb, output.depth lowercase : str = rgb[0, -3:, -3:, -1].flatten() lowercase : List[str] = rgb[0, -3:, -1].flatten() assert rgb.shape == (1, 512, 512, 3) assert depth.shape == (1, 512, 512) lowercase : Dict = np.array( [0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] ) lowercase : Union[str, Any] = np.array( [0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] ) assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3 assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3 @nightly @require_torch_gpu class _A ( unittest.TestCase ): def __a ( self : Tuple ) -> str: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : List[Any] , _A : Dict , _A : Tuple="cpu" , _A : List[Any]=torch.floataa , _A : Optional[int]=0 ) -> Any: """simple docstring""" lowercase : List[Any] = torch.Generator(device=_A ).manual_seed(_A ) lowercase : str = np.random.RandomState(_A ).standard_normal((1, 4, 64, 64) ) lowercase : List[str] = torch.from_numpy(_A ).to(device=_A , dtype=_A ) lowercase : Any = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 50, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def __a ( self : Dict ) -> List[str]: """simple docstring""" lowercase : Optional[int] = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(_A ) ldmad_pipe.set_progress_bar_config(disable=_A ) lowercase : Dict = self.get_inputs(_A ) lowercase : List[Any] = ldmad_pipe(**_A ) lowercase , lowercase : str = output.rgb, output.depth lowercase : Optional[Any] = 0.495_586 lowercase : List[str] = 0.33_795_515 lowercase : str = 112.48_518 lowercase : Optional[Any] = 98.489_746 assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3 assert np.abs(expected_depth_std - depth.std() ) < 1E-3 def __a ( self : str ) -> str: """simple docstring""" lowercase : Optional[int] = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(_A ) ldmad_pipe.set_progress_bar_config(disable=_A ) lowercase : List[Any] = self.get_inputs(_A ) lowercase : str = ldmad_pipe(**_A ) lowercase , lowercase : int = output.rgb, output.depth lowercase : List[str] = 0.4_194_127 lowercase : str = 0.35_375_586 lowercase : List[Any] = 0.5_638_502 lowercase : Tuple = 0.34_686_103 assert rgb.shape == (1, 512, 512, 3) assert depth.shape == (1, 512, 512, 1) assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3 assert np.abs(expected_depth_std - depth.std() ) < 1E-3
308
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'vocab_file': 'vocab.txt'} lowerCAmelCase_ = { 'vocab_file': { 'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt', }, } lowerCAmelCase_ = { 'openbmb/cpm-ant-10b': 10_24, } def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : Optional[int] = collections.OrderedDict() with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader: lowercase : str = reader.readlines() for index, token in enumerate(__magic_name__ ): lowercase : Union[str, Any] = token.rstrip('''\n''' ) lowercase : List[Any] = index return vocab class _A ( _lowerCamelCase ): def __init__( self : List[str] , _A : Any , _A : List[str]="<unk>" , _A : Union[str, Any]=200 ) -> List[Any]: """simple docstring""" lowercase : Optional[int] = vocab lowercase : List[str] = unk_token lowercase : Any = max_input_chars_per_word def __a ( self : List[str] , _A : Tuple ) -> str: """simple docstring""" lowercase : Dict = list(_A ) if len(_A ) > self.max_input_chars_per_word: return [self.unk_token] lowercase : int = 0 lowercase : Dict = [] while start < len(_A ): lowercase : Optional[Any] = len(_A ) lowercase : List[str] = None while start < end: lowercase : List[Any] = ''''''.join(chars[start:end] ) if substr in self.vocab: lowercase : Union[str, Any] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(_A ) lowercase : Dict = end return sub_tokens class _A ( _lowerCamelCase ): _UpperCamelCase : List[str] = VOCAB_FILES_NAMES _UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask'''] _UpperCamelCase : int = False def __init__( self : List[str] , _A : int , _A : Optional[Any]="<d>" , _A : Any="</d>" , _A : Optional[Any]="<s>" , _A : Any="</s>" , _A : Any="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]="</n>" , _A : List[str]="</_>" , _A : Optional[Any]="left" , **_A : str , ) -> Tuple: """simple docstring""" requires_backends(self , ['''jieba'''] ) super().__init__( bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , ) lowercase : str = bod_token lowercase : str = eod_token lowercase : Any = load_vocab(_A ) lowercase : List[Any] = self.encoder[space_token] lowercase : Tuple = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] lowercase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) ) lowercase : int = {v: k for k, v in self.encoder.items()} lowercase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def __a ( self : Dict ) -> Optional[int]: """simple docstring""" return self.encoder[self.bod_token] @property def __a ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" return self.encoder[self.eod_token] @property def __a ( self : List[str] ) -> List[str]: """simple docstring""" return self.encoder["\n"] @property def __a ( self : List[Any] ) -> int: """simple docstring""" return len(self.encoder ) def __a ( self : Union[str, Any] ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self : str , _A : List[str] ) -> Tuple: """simple docstring""" lowercase : int = [] for x in jieba.cut(_A , cut_all=_A ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) ) return output_tokens def __a ( self : List[Any] , _A : Tuple , **_A : Optional[int] ) -> Any: """simple docstring""" lowercase : List[str] = [i for i in token_ids if i >= 0] lowercase : Any = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(_A , **_A ) def __a ( self : List[Any] , _A : int ) -> Optional[Any]: """simple docstring""" return token in self.encoder def __a ( self : Dict , _A : List[str] ) -> str: """simple docstring""" return "".join(_A ) def __a ( self : List[str] , _A : List[str] ) -> Any: """simple docstring""" return self.encoder.get(_A , self.encoder.get(self.unk_token ) ) def __a ( self : Tuple , _A : Union[str, Any] ) -> Tuple: """simple docstring""" return self.decoder.get(_A , self.unk_token ) def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if os.path.isdir(_A ): lowercase : str = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: lowercase : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory lowercase : Any = 0 if " " in self.encoder: lowercase : List[Any] = self.encoder[''' '''] del self.encoder[" "] if "\n" in self.encoder: lowercase : Dict = self.encoder['''\n'''] del self.encoder["\n"] lowercase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) ) with open(_A , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) lowercase : Any = token_index writer.write(token + '''\n''' ) index += 1 return (vocab_file,) def __a ( self : str , _A : List[int] , _A : List[int] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is not None: return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) return [1] + ([0] * len(_A ))
308
1
import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def snake_case( __magic_name__ ) -> Optional[int]: '''simple docstring''' def wrapper(*__magic_name__ , **__magic_name__ ): lowercase : Dict = timeit.default_timer() lowercase : str = func(*__magic_name__ , **__magic_name__ ) lowercase : Optional[Any] = timeit.default_timer() - starttime return delta lowercase : str = func.__name__ return wrapper def snake_case( __magic_name__ , __magic_name__=1_00 , __magic_name__=None ) -> Dict: '''simple docstring''' lowercase : Union[str, Any] = [] lowercase : Optional[Any] = seq_shapes or {} for i in range(__magic_name__ ): lowercase : Dict = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(__magic_name__ , _ArrayXD ): lowercase : Dict = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(__magic_name__ , datasets.Value ): if v.dtype == "string": lowercase : int = '''The small grey turtle was surprisingly fast when challenged.''' else: lowercase : Optional[int] = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(__magic_name__ , datasets.Sequence ): while isinstance(__magic_name__ , datasets.Sequence ): lowercase : Dict = v.feature lowercase : Tuple = seq_shapes[k] lowercase : List[str] = np.random.rand(*__magic_name__ ).astype(v.dtype ) lowercase : Dict = data dummy_data.append((i, example) ) return dummy_data def snake_case( __magic_name__ , __magic_name__ , __magic_name__=1_00 , __magic_name__=None ) -> str: '''simple docstring''' lowercase : int = generate_examples(__magic_name__ , num_examples=__magic_name__ , seq_shapes=__magic_name__ ) with ArrowWriter(features=__magic_name__ , path=__magic_name__ ) as writer: for key, record in dummy_data: lowercase : str = features.encode_example(__magic_name__ ) writer.write(__magic_name__ ) lowercase , lowercase : int = writer.finalize() if not num_final_examples == num_examples: raise ValueError( F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" ) lowercase : Optional[Any] = datasets.Dataset.from_file(filename=__magic_name__ , info=datasets.DatasetInfo(features=__magic_name__ ) ) return dataset
308
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]: '''simple docstring''' lowercase : int = 1.5 lowercase : int = int(factor * num_class_images ) lowercase : Any = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 ) os.makedirs(F"""{class_data_dir}/images""" , exist_ok=__magic_name__ ) if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images: return while True: lowercase : str = client.query(text=__magic_name__ ) if len(__magic_name__ ) >= factor * num_class_images or num_images > 1e4: break else: lowercase : List[str] = int(factor * num_images ) lowercase : List[str] = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , ) lowercase : Dict = 0 lowercase : Optional[Any] = 0 lowercase : List[Any] = tqdm(desc='''downloading real regularization images''' , total=__magic_name__ ) with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open( F"""{class_data_dir}/images.txt""" , '''w''' ) as fa: while total < num_class_images: lowercase : int = class_images[count] count += 1 try: lowercase : int = requests.get(images['''url'''] ) if img.status_code == 2_00: lowercase : List[Any] = Image.open(BytesIO(img.content ) ) with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f: f.write(img.content ) fa.write(images['''caption'''] + '''\n''' ) fa.write(images['''url'''] + '''\n''' ) fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def snake_case( ) -> Optional[int]: '''simple docstring''' lowercase : List[str] = argparse.ArgumentParser('''''' , add_help=__magic_name__ ) parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ ) parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ ) parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=__magic_name__ ) return parser.parse_args() if __name__ == "__main__": lowerCAmelCase_ = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
308
1
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowerCAmelCase_ = abspath(join(dirname(dirname(dirname(__file__))), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def snake_case( __magic_name__ ) -> Union[str, Any]: '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__magic_name__ ) def snake_case( __magic_name__ ) -> List[str]: '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main lowercase : Optional[int] = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ )
308
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def snake_case( ) -> int: '''simple docstring''' lowercase : List[str] = ArgumentParser( description=( '''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes''' ) ) # Optional arguments for the launch helper parser.add_argument('''--num_cores''' , type=__magic_name__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' ) # positional parser.add_argument( '''training_script''' , type=__magic_name__ , help=( '''The full path to the single TPU training ''' '''program/script to be launched in parallel, ''' '''followed by all the arguments for the ''' '''training script''' ) , ) # rest from the training program parser.add_argument('''training_script_args''' , nargs=__magic_name__ ) return parser.parse_args() def snake_case( ) -> Union[str, Any]: '''simple docstring''' lowercase : Optional[Any] = parse_args() # Import training_script as a module. lowercase : Optional[Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowercase : int = script_fpath.stem lowercase : List[Any] = importlib.import_module(__magic_name__ ) # Patch sys.argv lowercase : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
308
1
import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) lowerCAmelCase_ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''', f'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''', f'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.encoder.norm.weight', 'encoder.layernorm.weight'), ('transformer.encoder.norm.bias', 'encoder.layernorm.bias'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ] ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' lowercase : Optional[int] = state_dict.pop(__magic_name__ ) lowercase : int = val def snake_case( __magic_name__ ) -> List[Any]: '''simple docstring''' lowercase : Any = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: lowercase : Optional[Any] = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) lowercase : List[str] = value else: lowercase : Optional[Any] = value return new_state_dict def snake_case( __magic_name__ ) -> Dict: '''simple docstring''' lowercase : List[str] = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) lowercase : Union[str, Any] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) lowercase : Optional[Any] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict lowercase : List[str] = in_proj_weight[:2_56, :] lowercase : Optional[Any] = in_proj_bias[:2_56] lowercase : Optional[int] = in_proj_weight[2_56:5_12, :] lowercase : Any = in_proj_bias[2_56:5_12] lowercase : Optional[int] = in_proj_weight[-2_56:, :] lowercase : Optional[int] = in_proj_bias[-2_56:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention lowercase : List[str] = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) lowercase : Union[str, Any] = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict lowercase : str = in_proj_weight[:2_56, :] lowercase : str = in_proj_bias[:2_56] lowercase : List[str] = in_proj_weight[2_56:5_12, :] lowercase : List[str] = in_proj_bias[2_56:5_12] lowercase : Union[str, Any] = in_proj_weight[-2_56:, :] lowercase : Any = in_proj_bias[-2_56:] # read in weights + bias of input projection layer of cross-attention lowercase : int = state_dict.pop( F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" ) lowercase : int = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) of cross-attention to the state dict lowercase : Dict = in_proj_weight_cross_attn[:2_56, :] lowercase : List[str] = in_proj_bias_cross_attn[:2_56] lowercase : Tuple = in_proj_weight_cross_attn[2_56:5_12, :] lowercase : Union[str, Any] = in_proj_bias_cross_attn[2_56:5_12] lowercase : str = in_proj_weight_cross_attn[-2_56:, :] lowercase : Tuple = in_proj_bias_cross_attn[-2_56:] def snake_case( __magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' lowercase , lowercase : Dict = image.size lowercase : str = max(__magic_name__ , __magic_name__ ) lowercase : Dict = 8_00 if '''detection''' in checkpoint_url else 10_00 lowercase : Optional[Any] = target_max_size / current_max_size lowercase : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def snake_case( __magic_name__ ) -> List[Any]: '''simple docstring''' lowercase : int = F.to_tensor(__magic_name__ ) lowercase : Dict = F.normalize(__magic_name__ , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ) return image @torch.no_grad() def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> int: '''simple docstring''' logger.info('''Converting model...''' ) # load original state dict lowercase : int = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' ) # rename keys for src, dest in rename_keys: rename_key(__magic_name__ , __magic_name__ , __magic_name__ ) lowercase : List[str] = rename_backbone_keys(__magic_name__ ) # query, key and value matrices need special treatment read_in_q_k_v(__magic_name__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them lowercase : Optional[int] = '''model.''' for key in state_dict.copy().keys(): if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): lowercase : Any = state_dict.pop(__magic_name__ ) lowercase : Optional[int] = val # create HuggingFace model and load state dict lowercase : List[Any] = TableTransformerConfig( backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: lowercase : Optional[Any] = 15 lowercase : Dict = 2 lowercase : Tuple = {0: '''table''', 1: '''table rotated'''} lowercase : Union[str, Any] = idalabel lowercase : Optional[int] = {v: k for k, v in idalabel.items()} else: lowercase : Dict = 1_25 lowercase : Any = 6 lowercase : Any = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } lowercase : Optional[int] = idalabel lowercase : List[Any] = {v: k for k, v in idalabel.items()} lowercase : List[str] = DetrImageProcessor( format='''coco_detection''' , max_size=8_00 if '''detection''' in checkpoint_url else 10_00 ) lowercase : List[str] = TableTransformerForObjectDetection(__magic_name__ ) model.load_state_dict(__magic_name__ ) model.eval() # verify our conversion lowercase : Any = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' lowercase : List[str] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__magic_name__ ) lowercase : Optional[Any] = Image.open(__magic_name__ ).convert('''RGB''' ) lowercase : Union[str, Any] = normalize(resize(__magic_name__ , __magic_name__ ) ).unsqueeze(0 ) lowercase : Union[str, Any] = model(__magic_name__ ) if "detection" in checkpoint_url: lowercase : Optional[Any] = (1, 15, 3) lowercase : Optional[int] = torch.tensor( [[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] ) lowercase : Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] ) else: lowercase : Optional[int] = (1, 1_25, 7) lowercase : List[str] = torch.tensor( [[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] ) lowercase : Union[str, Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) model.save_pretrained(__magic_name__ ) image_processor.save_pretrained(__magic_name__ ) if push_to_hub: # Push model to HF hub logger.info('''Pushing model to the hub...''' ) lowercase : List[Any] = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(__magic_name__ ) image_processor.push_to_hub(__magic_name__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '--checkpoint_url', default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth', type=str, choices=[ 'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth', 'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth', ], help='URL of the Table Transformer checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) lowerCAmelCase_ = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
308
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase_ = logging.get_logger(__name__) def snake_case( __magic_name__ ) -> List[List[ImageInput]]: '''simple docstring''' if isinstance(__magic_name__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__magic_name__ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__magic_name__ ): return [[videos]] raise ValueError(F"""Could not make batched video from {videos}""" ) class _A ( _lowerCamelCase ): _UpperCamelCase : str = ['''pixel_values'''] def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[int] , ) -> None: """simple docstring""" super().__init__(**_A ) lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 224} lowercase : Tuple = get_size_dict(_A , default_to_square=_A ) lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowercase : Dict = get_size_dict(_A , param_name='''crop_size''' ) lowercase : List[str] = do_resize lowercase : Optional[Any] = size lowercase : List[str] = do_center_crop lowercase : List[Any] = crop_size lowercase : str = resample lowercase : Tuple = do_rescale lowercase : Any = rescale_factor lowercase : Tuple = do_normalize lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray: """simple docstring""" lowercase : Tuple = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" in size: lowercase : Dict = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A ) elif "height" in size and "width" in size: lowercase : Union[str, Any] = (size['''height'''], size['''width''']) else: raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(_A , size=_A , resample=_A , data_format=_A , **_A ) def __a ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray: """simple docstring""" lowercase : Optional[Any] = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A ) def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Union[str, Any]: """simple docstring""" return rescale(_A , scale=_A , data_format=_A , **_A ) def __a ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray: """simple docstring""" return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def __a ( self : int , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: """simple docstring""" if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowercase : Union[str, Any] = to_numpy_array(_A ) if do_resize: lowercase : List[Any] = self.resize(image=_A , size=_A , resample=_A ) if do_center_crop: lowercase : Optional[int] = self.center_crop(_A , size=_A ) if do_rescale: lowercase : Tuple = self.rescale(image=_A , scale=_A ) if do_normalize: lowercase : Union[str, Any] = self.normalize(image=_A , mean=_A , std=_A ) lowercase : Any = to_channel_dimension_format(_A , _A ) return image def __a ( self : List[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Union[str, Any] , ) -> PIL.Image.Image: """simple docstring""" lowercase : str = do_resize if do_resize is not None else self.do_resize lowercase : Optional[Any] = resample if resample is not None else self.resample lowercase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase : str = do_rescale if do_rescale is not None else self.do_rescale lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean lowercase : Optional[Any] = image_std if image_std is not None else self.image_std lowercase : str = size if size is not None else self.size lowercase : Any = get_size_dict(_A , default_to_square=_A ) lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size lowercase : str = get_size_dict(_A , param_name='''crop_size''' ) if not valid_images(_A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) lowercase : Union[str, Any] = make_batched(_A ) lowercase : Dict = [ [ self._preprocess_image( image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , ) for img in video ] for video in videos ] lowercase : Tuple = {'''pixel_values''': videos} return BatchFeature(data=_A , tensor_type=_A )
308
1
# using dfs for finding eulerian path traversal def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> Tuple: '''simple docstring''' lowercase : str = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: lowercase , lowercase : int = True, True lowercase : List[Any] = dfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) return path def snake_case( __magic_name__ , __magic_name__ ) -> Optional[Any]: '''simple docstring''' lowercase : List[Any] = 0 lowercase : Union[str, Any] = -1 for i in range(__magic_name__ ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 lowercase : Any = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def snake_case( __magic_name__ , __magic_name__ ) -> Optional[int]: '''simple docstring''' lowercase : Optional[int] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] lowercase , lowercase : Union[str, Any] = check_circuit_or_path(__magic_name__ , __magic_name__ ) if check == 3: print('''graph is not Eulerian''' ) print('''no path''' ) return lowercase : Optional[Any] = 1 if check == 2: lowercase : Optional[int] = odd_node print('''graph has a Euler path''' ) if check == 1: print('''graph has a Euler cycle''' ) lowercase : int = dfs(__magic_name__ , __magic_name__ , __magic_name__ ) print(__magic_name__ ) def snake_case( ) -> List[str]: '''simple docstring''' lowercase : Tuple = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} lowercase : Optional[Any] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} lowercase : List[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} lowercase : str = {1: [2, 3], 2: [1, 3], 3: [1, 2]} lowercase : List[Any] = { 1: [], 2: [] # all degree is zero } lowercase : Union[str, Any] = 10 check_euler(__magic_name__ , __magic_name__ ) check_euler(__magic_name__ , __magic_name__ ) check_euler(__magic_name__ , __magic_name__ ) check_euler(__magic_name__ , __magic_name__ ) check_euler(__magic_name__ , __magic_name__ ) if __name__ == "__main__": main()
308
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(_lowerCamelCase ) , '''Tatoeba directory does not exist.''' ) class _A ( unittest.TestCase ): @cached_property def __a ( self : int ) -> Dict: """simple docstring""" lowercase : str = tempfile.mkdtemp() return TatoebaConverter(save_dir=_A ) @slow def __a ( self : Any ) -> List[Any]: """simple docstring""" self.resolver.convert_models(['''heb-eng'''] ) @slow def __a ( self : int ) -> Tuple: """simple docstring""" lowercase , lowercase : Optional[Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_A ) assert mmeta["long_pair"] == "heb-eng"
308
1
from __future__ import annotations def snake_case( __magic_name__ , __magic_name__ ) -> tuple[int, int]: '''simple docstring''' if b == 0: return (1, 0) ((lowercase) , (lowercase)) : int = extended_euclid(__magic_name__ , a % b ) lowercase : List[str] = a // b return (y, x - k * y) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> int: '''simple docstring''' ((lowercase) , (lowercase)) : str = extended_euclid(__magic_name__ , __magic_name__ ) lowercase : str = na * na lowercase : List[Any] = ra * x * na + ra * y * na return (n % m + m) % m def snake_case( __magic_name__ , __magic_name__ ) -> int: '''simple docstring''' ((lowercase) , (lowercase)) : str = extended_euclid(__magic_name__ , __magic_name__ ) if b < 0: lowercase : List[Any] = (b % n + n) % n return b def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> int: '''simple docstring''' lowercase , lowercase : Any = invert_modulo(__magic_name__ , __magic_name__ ), invert_modulo(__magic_name__ , __magic_name__ ) lowercase : List[str] = na * na lowercase : List[Any] = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name='chinese_remainder_theorem', verbose=True) testmod(name='chinese_remainder_theorem2', verbose=True) testmod(name='invert_modulo', verbose=True) testmod(name='extended_euclid', verbose=True)
308
from __future__ import annotations from typing import Any def snake_case( __magic_name__ ) -> None: '''simple docstring''' create_state_space_tree(__magic_name__ , [] , 0 ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> None: '''simple docstring''' if index == len(__magic_name__ ): print(__magic_name__ ) return create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 ) current_subsequence.pop() if __name__ == "__main__": lowerCAmelCase_ = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(['A', 'B', 'C']) generate_all_subsequences(seq)
308
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = '▁' lowerCAmelCase_ = {'vocab_file': 'spiece.model'} lowerCAmelCase_ = { 'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'} } lowerCAmelCase_ = { 'google/pegasus-xsum': 5_12, } lowerCAmelCase_ = logging.get_logger(__name__) class _A ( _lowerCamelCase ): _UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES _UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES _UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Optional[int] = ['''input_ids''', '''attention_mask'''] def __init__( self : List[Any] , _A : Union[str, Any] , _A : Optional[int]="<pad>" , _A : Tuple="</s>" , _A : str="<unk>" , _A : List[Any]="<mask_2>" , _A : Optional[int]="<mask_1>" , _A : Dict=None , _A : List[Any]=103 , _A : Optional[Dict[str, Any]] = None , **_A : Optional[int] , ) -> None: """simple docstring""" lowercase : List[Any] = offset if additional_special_tokens is not None: if not isinstance(_A , _A ): raise TypeError( f"""additional_special_tokens should be of type {type(_A )}, but is""" f""" {type(_A )}""" ) lowercase : Optional[int] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f"""<unk_{i}>""" for i in range(len(_A ) , self.offset - 1 ) ] if len(set(_A ) ) != len(_A ): raise ValueError( '''Please make sure that the provided additional_special_tokens do not contain an incorrectly''' f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" ) lowercase : str = additional_special_tokens_extended else: lowercase : List[Any] = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )] lowercase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=_A , unk_token=_A , mask_token=_A , pad_token=_A , mask_token_sent=_A , offset=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , ) lowercase : Any = mask_token_sent lowercase : Optional[int] = vocab_file lowercase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_A ) # add special tokens to encoder dict lowercase : Dict[int, str] = { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) lowercase : Dict[str, int] = {v: k for k, v in self.encoder.items()} @property def __a ( self : Optional[Any] ) -> int: """simple docstring""" return len(self.sp_model ) + self.offset def __a ( self : Optional[int] ) -> Dict[str, int]: """simple docstring""" lowercase : int = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[Any] ) -> Dict: """simple docstring""" lowercase : str = self.__dict__.copy() lowercase : str = None return state def __setstate__( self : Optional[Any] , _A : Dict ) -> Optional[int]: """simple docstring""" lowercase : List[Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase : Any = {} lowercase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __a ( self : List[str] , _A : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(_A , out_type=_A ) def __a ( self : int , _A : str ) -> int: """simple docstring""" if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] lowercase : str = self.sp_model.piece_to_id(_A ) return sp_id + self.offset def __a ( self : Any , _A : int ) -> str: """simple docstring""" if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: lowercase : List[str] = self.sp_model.IdToPiece(index - self.offset ) return token def __a ( self : List[str] , _A : Tuple ) -> str: """simple docstring""" lowercase : str = [] lowercase : int = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_A ) + token lowercase : Tuple = [] else: current_sub_tokens.append(_A ) out_string += self.sp_model.decode(_A ) return out_string.strip() def __a ( self : int , _A : List[Any]=False ) -> Optional[Any]: """simple docstring""" return 1 def __a ( self : Union[str, Any] , _A : Dict ) -> List[Any]: """simple docstring""" lowercase : Any = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def __a ( self : Any , _A : List , _A : Optional[List] = None , _A : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return self._special_token_mask(_A ) elif token_ids_a is None: return self._special_token_mask(_A ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def __a ( self : Optional[Any] , _A : Union[str, Any] , _A : Any=None ) -> List[int]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase : Optional[Any] = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _A ) elif not os.path.isfile(self.vocab_file ): with open(_A , '''wb''' ) as fi: lowercase : List[str] = self.sp_model.serialized_model_proto() fi.write(_A ) return (out_vocab_file,)
308
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging lowerCAmelCase_ = logging.get_logger(__name__) class _A ( _lowerCamelCase ): _UpperCamelCase : Dict = ['''input_features'''] def __init__( self : int , _A : int=80 , _A : Union[str, Any]=16_000 , _A : Union[str, Any]=160 , _A : Any=30 , _A : str=400 , _A : Union[str, Any]=0.0 , _A : Tuple=False , **_A : List[str] , ) -> int: """simple docstring""" super().__init__( feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , ) lowercase : Optional[Any] = n_fft lowercase : Optional[int] = hop_length lowercase : Optional[int] = chunk_length lowercase : Union[str, Any] = chunk_length * sampling_rate lowercase : Optional[Any] = self.n_samples // hop_length lowercase : Optional[Any] = sampling_rate lowercase : Union[str, Any] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , ) def __a ( self : Dict , _A : np.array ) -> np.ndarray: """simple docstring""" lowercase : List[str] = spectrogram( _A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , ) lowercase : Union[str, Any] = log_spec[:, :-1] lowercase : Optional[Any] = np.maximum(_A , log_spec.max() - 8.0 ) lowercase : str = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]: """simple docstring""" if attention_mask is not None: lowercase : Optional[Any] = np.array(_A , np.intaa ) lowercase : List[str] = [] for vector, length in zip(_A , attention_mask.sum(-1 ) ): lowercase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: lowercase : int = padding_value normed_input_values.append(_A ) else: lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self : Union[str, Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : int , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) lowercase : Union[str, Any] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) lowercase : Optional[Any] = is_batched_numpy or ( isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowercase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_A , np.ndarray ): lowercase : List[Any] = np.asarray(_A , dtype=np.floataa ) elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase : List[str] = [np.asarray([raw_speech] ).T] lowercase : Tuple = BatchFeature({'''input_features''': raw_speech} ) # convert into correct format for padding lowercase : str = self.pad( _A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: lowercase : Tuple = self.zero_mean_unit_var_norm( padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , ) lowercase : str = np.stack(padded_inputs['''input_features'''] , axis=0 ) # make sure list is in array format lowercase : List[str] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 ) lowercase : str = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]] if isinstance(input_features[0] , _A ): lowercase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features] else: lowercase : Optional[int] = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) lowercase : List[str] = padded_inputs['''attention_mask'''][:, :: self.hop_length] if return_tensors is not None: lowercase : Any = padded_inputs.convert_to_tensors(_A ) return padded_inputs def __a ( self : Optional[Any] ) -> Dict[str, Any]: """simple docstring""" lowercase : Optional[Any] = copy.deepcopy(self.__dict__ ) lowercase : Dict = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
308
1
import argparse from collections import defaultdict import yaml lowerCAmelCase_ = 'docs/source/en/_toctree.yml' def snake_case( __magic_name__ ) -> Any: '''simple docstring''' lowercase : Any = defaultdict(__magic_name__ ) lowercase : Tuple = [] lowercase : Union[str, Any] = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} ) else: new_doc_list.append(__magic_name__ ) lowercase : str = new_doc_list lowercase : List[Any] = [key for key, value in counts.items() if value > 1] lowercase : int = [] for duplicate_key in duplicates: lowercase : Optional[int] = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} ) if len(__magic_name__ ) > 1: raise ValueError( F"""{duplicate_key} is present several times in the documentation table of content at """ '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] ) lowercase : int = sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(__magic_name__ ) > 1: raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' ) overview_doc.extend(__magic_name__ ) # Sort return overview_doc def snake_case( __magic_name__=False ) -> int: '''simple docstring''' with open(__magic_name__ , encoding='''utf-8''' ) as f: lowercase : Any = yaml.safe_load(f.read() ) # Get to the API doc lowercase : List[Any] = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowercase : int = content[api_idx]['''sections'''] # Then to the model doc lowercase : Union[str, Any] = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 lowercase : Any = api_doc[scheduler_idx]['''sections'''] lowercase : List[str] = clean_doc_toc(__magic_name__ ) lowercase : List[Any] = False if new_scheduler_doc != scheduler_doc: lowercase : Union[str, Any] = True if overwrite: lowercase : Any = new_scheduler_doc if diff: if overwrite: lowercase : int = api_doc with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) def snake_case( __magic_name__=False ) -> Optional[int]: '''simple docstring''' with open(__magic_name__ , encoding='''utf-8''' ) as f: lowercase : Tuple = yaml.safe_load(f.read() ) # Get to the API doc lowercase : Optional[int] = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowercase : List[str] = content[api_idx]['''sections'''] # Then to the model doc lowercase : Optional[Any] = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 lowercase : str = False lowercase : List[Any] = api_doc[pipeline_idx]['''sections'''] lowercase : Optional[Any] = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: lowercase : Optional[Any] = pipeline_doc['''section'''] lowercase : List[str] = clean_doc_toc(__magic_name__ ) if overwrite: lowercase : List[str] = new_sub_pipeline_doc new_pipeline_docs.append(__magic_name__ ) # sort overall pipeline doc lowercase : str = clean_doc_toc(__magic_name__ ) if new_pipeline_docs != pipeline_docs: lowercase : Optional[int] = True if overwrite: lowercase : Optional[Any] = new_pipeline_docs if diff: if overwrite: lowercase : Optional[Any] = api_doc with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowerCAmelCase_ = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
308
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class _A : def __init__( self : int , _A : Optional[int] , _A : Any=13 , _A : List[Any]=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : str=True , _A : Any=True , _A : Dict=True , _A : Optional[Any]=False , _A : Any=False , _A : List[str]=False , _A : Optional[int]=2 , _A : List[Any]=99 , _A : str=0 , _A : Dict=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]="last" , _A : List[str]=True , _A : Tuple=None , _A : Optional[Any]=0 , ) -> Any: """simple docstring""" lowercase : str = parent lowercase : Optional[Any] = batch_size lowercase : Union[str, Any] = seq_length lowercase : str = is_training lowercase : str = use_input_lengths lowercase : List[Any] = use_token_type_ids lowercase : Union[str, Any] = use_labels lowercase : Tuple = gelu_activation lowercase : Dict = sinusoidal_embeddings lowercase : Any = causal lowercase : str = asm lowercase : Optional[Any] = n_langs lowercase : Dict = vocab_size lowercase : Dict = n_special lowercase : List[Any] = hidden_size lowercase : str = num_hidden_layers lowercase : int = num_attention_heads lowercase : str = hidden_dropout_prob lowercase : Dict = attention_probs_dropout_prob lowercase : List[Any] = max_position_embeddings lowercase : Optional[int] = type_sequence_label_size lowercase : List[str] = initializer_range lowercase : List[str] = num_labels lowercase : int = num_choices lowercase : int = summary_type lowercase : Tuple = use_proj lowercase : Union[str, Any] = scope lowercase : List[str] = bos_token_id def __a ( self : Any ) -> Dict: """simple docstring""" lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase : str = None if self.use_input_lengths: lowercase : int = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase : Union[str, Any] = None if self.use_token_type_ids: lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase : Union[str, Any] = None lowercase : List[str] = None lowercase : Optional[Any] = None if self.use_labels: lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float() lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices ) lowercase : List[Any] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __a ( self : Any ) -> List[Any]: """simple docstring""" return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def __a ( self : int , _A : str , _A : Optional[Any] , _A : int , _A : List[str] , _A : Any , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : Tuple , ) -> List[Any]: """simple docstring""" lowercase : List[Any] = XLMModel(config=_A ) model.to(_A ) model.eval() lowercase : Tuple = model(_A , lengths=_A , langs=_A ) lowercase : Dict = model(_A , langs=_A ) lowercase : int = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : int , _A : Dict , _A : int , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : Dict , ) -> Optional[Any]: """simple docstring""" lowercase : Optional[int] = XLMWithLMHeadModel(_A ) model.to(_A ) model.eval() lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Tuple , _A : int , ) -> Union[str, Any]: """simple docstring""" lowercase : Dict = XLMForQuestionAnsweringSimple(_A ) model.to(_A ) model.eval() lowercase : List[str] = model(_A ) lowercase : Any = model(_A , start_positions=_A , end_positions=_A ) lowercase : Any = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : Any , _A : str , _A : Union[str, Any] , ) -> Dict: """simple docstring""" lowercase : Optional[int] = XLMForQuestionAnswering(_A ) model.to(_A ) model.eval() lowercase : Any = model(_A ) lowercase : Tuple = model( _A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , ) lowercase : Optional[int] = model( _A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , ) ((lowercase) , ) : Optional[int] = result_with_labels.to_tuple() lowercase : List[str] = model(_A , start_positions=_A , end_positions=_A ) ((lowercase) , ) : Any = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def __a ( self : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , ) -> int: """simple docstring""" lowercase : List[str] = XLMForSequenceClassification(_A ) model.to(_A ) model.eval() lowercase : List[str] = model(_A ) lowercase : Union[str, Any] = model(_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Union[str, Any] , _A : str , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , ) -> Dict: """simple docstring""" lowercase : Optional[Any] = self.num_labels lowercase : Tuple = XLMForTokenClassification(_A ) model.to(_A ) model.eval() lowercase : str = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self : List[Any] , _A : List[str] , _A : Dict , _A : str , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , ) -> Union[str, Any]: """simple docstring""" lowercase : int = self.num_choices lowercase : List[Any] = XLMForMultipleChoice(config=_A ) model.to(_A ) model.eval() lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Dict = model( _A , attention_mask=_A , token_type_ids=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" lowercase : List[Any] = self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Union[str, Any] = config_and_inputs lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths} return config, inputs_dict @require_torch class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): _UpperCamelCase : Any = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _UpperCamelCase : str = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _UpperCamelCase : Tuple = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def __a ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] ) -> List[Any]: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __a ( self : Dict , _A : Tuple , _A : List[str] , _A : int=False ) -> Optional[Any]: """simple docstring""" lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": lowercase : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) lowercase : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) return inputs_dict def __a ( self : Any ) -> List[str]: """simple docstring""" lowercase : List[str] = XLMModelTester(self ) lowercase : Any = ConfigTester(self , config_class=_A , emb_dim=37 ) def __a ( self : List[Any] ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def __a ( self : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*_A ) def __a ( self : Any ) -> Dict: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*_A ) def __a ( self : List[str] ) -> Optional[int]: """simple docstring""" lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*_A ) def __a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*_A ) def __a ( self : List[str] ) -> Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*_A ) def __a ( self : Dict ) -> int: """simple docstring""" lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*_A ) def __a ( self : Any ) -> List[Any]: """simple docstring""" lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*_A ) def __a ( self : int , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any: """simple docstring""" self.assertIsInstance(_A , _A ) self.assertListEqual( [isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) ) self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(_A ): # adds PAD dummy token lowercase : List[Any] = min_length + idx + 1 lowercase : str = min_length + idx + 1 lowercase : Any = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) ) def __a ( self : int , _A : Optional[int] , _A : Dict , _A : Any , _A : List[str] , _A : Optional[int] , _A : List[Any]=False , _A : List[Any]=1 ) -> str: """simple docstring""" self.assertIsInstance(_A , _A ) self.assertListEqual( [isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , ) self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(_A ): # adds PAD dummy token lowercase : Union[str, Any] = min_length + idx + 1 lowercase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , ) pass @slow def __a ( self : Optional[int] ) -> Any: """simple docstring""" for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : Any = XLMModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch class _A ( unittest.TestCase ): @slow def __a ( self : Any ) -> Optional[Any]: """simple docstring""" lowercase : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' ) model.to(_A ) lowercase : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president lowercase : List[str] = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference lowercase : Dict = model.generate(_A , do_sample=_A ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
308
1
import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class _A ( nn.Module ): _UpperCamelCase : int _UpperCamelCase : int _UpperCamelCase : float = 0.0 _UpperCamelCase : int = 1 _UpperCamelCase : int = 1 _UpperCamelCase : bool = True _UpperCamelCase : bool = False _UpperCamelCase : bool = False _UpperCamelCase : bool = False _UpperCamelCase : jnp.dtype = jnp.floataa def __a ( self : int ) -> Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = [] lowercase : Optional[Any] = [] for i in range(self.num_layers ): lowercase : int = self.in_channels if i == 0 else self.out_channels lowercase : Tuple = FlaxResnetBlockaD( in_channels=_A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_A ) lowercase : Optional[int] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_A ) lowercase : Any = resnets lowercase : Union[str, Any] = attentions if self.add_downsample: lowercase : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : List[str] , _A : str , _A : str , _A : Union[str, Any] , _A : List[str]=True ) -> Any: """simple docstring""" lowercase : Optional[int] = () for resnet, attn in zip(self.resnets , self.attentions ): lowercase : Optional[Any] = resnet(_A , _A , deterministic=_A ) lowercase : Dict = attn(_A , _A , deterministic=_A ) output_states += (hidden_states,) if self.add_downsample: lowercase : Optional[int] = self.downsamplers_a(_A ) output_states += (hidden_states,) return hidden_states, output_states class _A ( nn.Module ): _UpperCamelCase : int _UpperCamelCase : int _UpperCamelCase : float = 0.0 _UpperCamelCase : int = 1 _UpperCamelCase : bool = True _UpperCamelCase : jnp.dtype = jnp.floataa def __a ( self : int ) -> Dict: """simple docstring""" lowercase : Dict = [] for i in range(self.num_layers ): lowercase : int = self.in_channels if i == 0 else self.out_channels lowercase : Dict = FlaxResnetBlockaD( in_channels=_A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_A ) lowercase : List[str] = resnets if self.add_downsample: lowercase : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] , _A : Optional[Any]=True ) -> str: """simple docstring""" lowercase : Tuple = () for resnet in self.resnets: lowercase : Tuple = resnet(_A , _A , deterministic=_A ) output_states += (hidden_states,) if self.add_downsample: lowercase : Optional[Any] = self.downsamplers_a(_A ) output_states += (hidden_states,) return hidden_states, output_states class _A ( nn.Module ): _UpperCamelCase : int _UpperCamelCase : int _UpperCamelCase : int _UpperCamelCase : float = 0.0 _UpperCamelCase : int = 1 _UpperCamelCase : int = 1 _UpperCamelCase : bool = True _UpperCamelCase : bool = False _UpperCamelCase : bool = False _UpperCamelCase : bool = False _UpperCamelCase : jnp.dtype = jnp.floataa def __a ( self : Dict ) -> Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = [] lowercase : int = [] for i in range(self.num_layers ): lowercase : Any = self.in_channels if (i == self.num_layers - 1) else self.out_channels lowercase : str = self.prev_output_channel if i == 0 else self.out_channels lowercase : Optional[Any] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_A ) lowercase : Tuple = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_A ) lowercase : int = resnets lowercase : Dict = attentions if self.add_upsample: lowercase : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : int , _A : List[str] , _A : Optional[int] , _A : Dict , _A : List[Any] , _A : List[Any]=True ) -> Union[str, Any]: """simple docstring""" for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states lowercase : str = res_hidden_states_tuple[-1] lowercase : Optional[Any] = res_hidden_states_tuple[:-1] lowercase : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) lowercase : int = resnet(_A , _A , deterministic=_A ) lowercase : Optional[int] = attn(_A , _A , deterministic=_A ) if self.add_upsample: lowercase : Optional[int] = self.upsamplers_a(_A ) return hidden_states class _A ( nn.Module ): _UpperCamelCase : int _UpperCamelCase : int _UpperCamelCase : int _UpperCamelCase : float = 0.0 _UpperCamelCase : int = 1 _UpperCamelCase : bool = True _UpperCamelCase : jnp.dtype = jnp.floataa def __a ( self : str ) -> List[str]: """simple docstring""" lowercase : List[str] = [] for i in range(self.num_layers ): lowercase : Dict = self.in_channels if (i == self.num_layers - 1) else self.out_channels lowercase : Any = self.prev_output_channel if i == 0 else self.out_channels lowercase : Any = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_A ) lowercase : List[Any] = resnets if self.add_upsample: lowercase : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Tuple , _A : Any , _A : Any , _A : int , _A : Optional[Any]=True ) -> int: """simple docstring""" for resnet in self.resnets: # pop res hidden states lowercase : List[Any] = res_hidden_states_tuple[-1] lowercase : List[Any] = res_hidden_states_tuple[:-1] lowercase : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) lowercase : Any = resnet(_A , _A , deterministic=_A ) if self.add_upsample: lowercase : List[Any] = self.upsamplers_a(_A ) return hidden_states class _A ( nn.Module ): _UpperCamelCase : int _UpperCamelCase : float = 0.0 _UpperCamelCase : int = 1 _UpperCamelCase : int = 1 _UpperCamelCase : bool = False _UpperCamelCase : bool = False _UpperCamelCase : jnp.dtype = jnp.floataa def __a ( self : Tuple ) -> Any: """simple docstring""" lowercase : List[Any] = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] lowercase : Optional[Any] = [] for _ in range(self.num_layers ): lowercase : Tuple = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_A ) lowercase : Optional[Any] = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_A ) lowercase : Optional[int] = resnets lowercase : List[Any] = attentions def __call__( self : List[str] , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Union[str, Any]=True ) -> str: """simple docstring""" lowercase : List[str] = self.resnets[0](_A , _A ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): lowercase : Optional[int] = attn(_A , _A , deterministic=_A ) lowercase : Optional[int] = resnet(_A , _A , deterministic=_A ) return hidden_states
308
def snake_case( __magic_name__ = 50 ) -> int: '''simple docstring''' lowercase : Union[str, Any] = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
308
1
def snake_case( __magic_name__ , __magic_name__ ) -> list[str]: '''simple docstring''' return [sentence[i : i + ngram_size] for i in range(len(__magic_name__ ) - ngram_size + 1 )] if __name__ == "__main__": from doctest import testmod testmod()
308
import os def snake_case( __magic_name__ = "input.txt" ) -> int: '''simple docstring''' with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file: lowercase : Any = [ [int(__magic_name__ ) for element in line.split(''',''' )] for line in input_file.readlines() ] lowercase : List[Any] = len(__magic_name__ ) lowercase : Any = len(matrix[0] ) lowercase : Tuple = [[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )] for i in range(__magic_name__ ): lowercase : str = matrix[i][0] for j in range(1 , __magic_name__ ): for i in range(__magic_name__ ): lowercase : Any = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , __magic_name__ ): lowercase : Any = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowercase : Any = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'''{solution() = }''')
308
1
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin lowerCAmelCase_ = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n' class _A ( unittest.TestCase , _lowerCamelCase ): def __a ( self : Tuple ) -> List[Any]: """simple docstring""" lowercase : int = load_tool('''text-question-answering''' ) self.tool.setup() lowercase : Union[str, Any] = load_tool('''text-question-answering''' , remote=_A ) def __a ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" lowercase : str = self.tool(_A , '''What did Hugging Face do in April 2021?''' ) self.assertEqual(_A , '''launched the BigScience Research Workshop''' ) def __a ( self : Dict ) -> Optional[Any]: """simple docstring""" lowercase : List[str] = self.remote_tool(_A , '''What did Hugging Face do in April 2021?''' ) self.assertEqual(_A , '''launched the BigScience Research Workshop''' ) def __a ( self : List[Any] ) -> Optional[int]: """simple docstring""" lowercase : Tuple = self.tool(text=_A , question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(_A , '''launched the BigScience Research Workshop''' ) def __a ( self : int ) -> Dict: """simple docstring""" lowercase : Any = self.remote_tool(text=_A , question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(_A , '''launched the BigScience Research Workshop''' )
308
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): @slow def __a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' ) lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' ) lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids lowercase : List[Any] = model(_A , labels=_A ).loss lowercase : Dict = -tf.math.reduce_mean(_A ).numpy() lowercase : Union[str, Any] = -21.228_168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
308
1
import requests from bsa import BeautifulSoup def snake_case( __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' lowercase : int = BeautifulSoup(requests.get(__magic_name__ , params=__magic_name__ ).content , '''html.parser''' ) lowercase : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) lowercase : Tuple = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": lowerCAmelCase_ = { 'title': ( 'Precisely geometry controlled microsupercapacitors for ultrahigh areal ' 'capacitance, volumetric capacitance, and energy density' ), 'journal': 'Chem. Mater.', 'volume': 30, 'pages': '3979-3990', 'year': 20_18, 'hl': 'en', } print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
308
from heapq import heappop, heappush import numpy as np def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> tuple[float | int, list[tuple[int, int]]]: '''simple docstring''' lowercase , lowercase : Optional[int] = grid.shape lowercase : Optional[int] = [-1, 1, 0, 0] lowercase : List[str] = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] lowercase , lowercase : Union[str, Any] = [(0, source)], set() lowercase : List[str] = np.full((rows, cols) , np.inf ) lowercase : Dict = 0 lowercase : Dict = np.empty((rows, cols) , dtype=__magic_name__ ) lowercase : Any = None while queue: ((lowercase) , (lowercase)) : Optional[Any] = heappop(__magic_name__ ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: lowercase : Tuple = [] while (x, y) != source: path.append((x, y) ) lowercase , lowercase : Optional[int] = predecessors[x, y] path.append(__magic_name__ ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(__magic_name__ ) ): lowercase , lowercase : Optional[int] = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: lowercase : List[Any] = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(__magic_name__ , (dist + 1, (nx, ny)) ) lowercase : int = dist + 1 lowercase : Optional[Any] = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
308
1
import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('.') def snake_case( __magic_name__ ) -> List[Any]: '''simple docstring''' lowercase : Tuple = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ''' F"""{test_file} instead.""" ) lowercase : Dict = components[-1] if not test_fn.endswith('''py''' ): raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" ) if not test_fn.startswith('''test_modeling_''' ): raise ValueError( F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" ) lowercase : str = components[:-1] + [test_fn.replace('''.py''' , '''''' )] lowercase : Dict = '''.'''.join(__magic_name__ ) return test_module_path def snake_case( __magic_name__ ) -> Optional[int]: '''simple docstring''' lowercase : Union[str, Any] = get_module_path(__magic_name__ ) lowercase : Tuple = importlib.import_module(__magic_name__ ) return test_module def snake_case( __magic_name__ ) -> List[str]: '''simple docstring''' lowercase : Union[str, Any] = [] lowercase : Any = get_test_module(__magic_name__ ) for attr in dir(__magic_name__ ): if attr.endswith('''ModelTester''' ): tester_classes.append(getattr(__magic_name__ , __magic_name__ ) ) # sort with class names return sorted(__magic_name__ , key=lambda __magic_name__ : x.__name__ ) def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : Union[str, Any] = [] lowercase : List[str] = get_test_module(__magic_name__ ) for attr in dir(__magic_name__ ): lowercase : Optional[int] = getattr(__magic_name__ , __magic_name__ ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). lowercase : Any = getattr(__magic_name__ , '''all_model_classes''' , [] ) if len(__magic_name__ ) > 0: test_classes.append(__magic_name__ ) # sort with class names return sorted(__magic_name__ , key=lambda __magic_name__ : x.__name__ ) def snake_case( __magic_name__ ) -> Dict: '''simple docstring''' lowercase : Union[str, Any] = get_test_classes(__magic_name__ ) lowercase : List[str] = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(__magic_name__ , key=lambda __magic_name__ : x.__name__ ) def snake_case( __magic_name__ ) -> Any: '''simple docstring''' lowercase : Optional[int] = test_class() if hasattr(__magic_name__ , '''setUp''' ): test.setUp() lowercase : Dict = None if hasattr(__magic_name__ , '''model_tester''' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: lowercase : Optional[Any] = test.model_tester.__class__ return model_tester def snake_case( __magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' lowercase : List[str] = get_test_classes(__magic_name__ ) lowercase : Tuple = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(__magic_name__ ) # sort with class names return sorted(__magic_name__ , key=lambda __magic_name__ : x.__name__ ) def snake_case( __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' lowercase : Optional[Any] = get_test_classes_for_model(__magic_name__ , __magic_name__ ) lowercase : Dict = [] for test_class in test_classes: lowercase : Any = get_model_tester_from_test_class(__magic_name__ ) if tester_class is not None: tester_classes.append(__magic_name__ ) # sort with class names return sorted(__magic_name__ , key=lambda __magic_name__ : x.__name__ ) def snake_case( __magic_name__ ) -> Dict: '''simple docstring''' lowercase : Optional[int] = get_test_classes(__magic_name__ ) lowercase : Any = {test_class: get_model_tester_from_test_class(__magic_name__ ) for test_class in test_classes} return test_tester_mapping def snake_case( __magic_name__ ) -> Tuple: '''simple docstring''' lowercase : Tuple = get_model_classes(__magic_name__ ) lowercase : int = { model_class: get_test_classes_for_model(__magic_name__ , __magic_name__ ) for model_class in model_classes } return model_test_mapping def snake_case( __magic_name__ ) -> Dict: '''simple docstring''' lowercase : Optional[Any] = get_model_classes(__magic_name__ ) lowercase : Tuple = { model_class: get_tester_classes_for_model(__magic_name__ , __magic_name__ ) for model_class in model_classes } return model_to_tester_mapping def snake_case( __magic_name__ ) -> str: '''simple docstring''' if isinstance(__magic_name__ , __magic_name__ ): return o elif isinstance(__magic_name__ , __magic_name__ ): return o.__name__ elif isinstance(__magic_name__ , (list, tuple) ): return [to_json(__magic_name__ ) for x in o] elif isinstance(__magic_name__ , __magic_name__ ): return {to_json(__magic_name__ ): to_json(__magic_name__ ) for k, v in o.items()} else: return o
308
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase_ = { 'configuration_mask2former': [ 'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Mask2FormerConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['Mask2FormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'Mask2FormerForUniversalSegmentation', 'Mask2FormerModel', 'Mask2FormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
308
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = { 'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['ConvBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
308
def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : List[Any] = abs(__magic_name__ ) lowercase : Optional[Any] = 0 while n > 0: res += n % 10 n //= 10 return res def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : Optional[int] = abs(__magic_name__ ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def snake_case( __magic_name__ ) -> int: '''simple docstring''' return sum(int(__magic_name__ ) for c in str(abs(__magic_name__ ) ) ) def snake_case( ) -> None: '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(__magic_name__ , __magic_name__ ) -> None: lowercase : str = F"""{func.__name__}({value})""" lowercase : Any = timeit(F"""__main__.{call}""" , setup='''import __main__''' ) print(F"""{call:56} = {func(__magic_name__ )} -- {timing:.4f} seconds""" ) for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(__magic_name__ , __magic_name__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
308
1
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm lowerCAmelCase_ = logging.get_logger(__name__) @dataclass class _A ( _lowerCamelCase ): _UpperCamelCase : str = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self : Dict , **_A : List[Any] ) -> Optional[int]: """simple docstring""" for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase : Tuple = deprecated_arg[3:] setattr(self , _A , not kwargs.pop(_A ) ) logger.warning( f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or""" f""" {positive_arg}={kwargs[positive_arg]}""" ) lowercase : Optional[Any] = kwargs.pop('''torchscript''' , self.torchscript ) lowercase : Dict = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics ) lowercase : Optional[Any] = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level ) super().__init__(**_A ) _UpperCamelCase : bool = field(default=_lowerCamelCase , metadata={'''help''': '''Trace the models using torchscript'''} ) _UpperCamelCase : bool = field(default=_lowerCamelCase , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} ) _UpperCamelCase : str = field( default='''O1''' , metadata={ '''help''': ( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ''' '''See details at https://nvidia.github.io/apex/amp.html''' ) } , ) @cached_property def __a ( self : Union[str, Any] ) -> Tuple["torch.device", int]: """simple docstring""" requires_backends(self , ['''torch'''] ) logger.info('''PyTorch: setting up devices''' ) if not self.cuda: lowercase : Tuple = torch.device('''cpu''' ) lowercase : Tuple = 0 elif is_torch_tpu_available(): lowercase : Optional[int] = xm.xla_device() lowercase : Any = 0 else: lowercase : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) lowercase : Dict = torch.cuda.device_count() return device, n_gpu @property def __a ( self : Any ) -> Optional[Any]: """simple docstring""" return is_torch_tpu_available() and self.tpu @property def __a ( self : int ) -> int: """simple docstring""" requires_backends(self , ['''torch'''] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def __a ( self : Any ) -> "torch.device": """simple docstring""" requires_backends(self , ['''torch'''] ) return self._setup_devices[0] @property def __a ( self : int ) -> Dict: """simple docstring""" requires_backends(self , ['''torch'''] ) return self._setup_devices[1] @property def __a ( self : str ) -> List[str]: """simple docstring""" return self.n_gpu > 0
308
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def snake_case( ) -> List[str]: '''simple docstring''' lowercase : Any = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=__magic_name__ ) lowercase : Optional[Any] = parser.add_subparsers(help='''accelerate command helpers''' ) # Register commands get_config_parser(subparsers=__magic_name__ ) env_command_parser(subparsers=__magic_name__ ) launch_command_parser(subparsers=__magic_name__ ) tpu_command_parser(subparsers=__magic_name__ ) test_command_parser(subparsers=__magic_name__ ) # Let's go lowercase : Dict = parser.parse_args() if not hasattr(__magic_name__ , '''func''' ): parser.print_help() exit(1 ) # Run args.func(__magic_name__ ) if __name__ == "__main__": main()
308
1
from __future__ import annotations import time import numpy as np lowerCAmelCase_ = [8, 5, 9, 7] lowerCAmelCase_ = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] lowerCAmelCase_ = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class _A : def __init__( self : List[str] , _A : list[int] , _A : list[list[int]] , _A : list[list[int]] , ) -> None: """simple docstring""" lowercase : Tuple = claim_vector lowercase : str = allocated_resources_table lowercase : Optional[int] = maximum_claim_table def __a ( self : List[str] ) -> list[int]: """simple docstring""" return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def __a ( self : Optional[Any] ) -> list[int]: """simple docstring""" return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def __a ( self : Any ) -> list[list[int]]: """simple docstring""" return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(_A ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def __a ( self : Any ) -> dict[int, list[int]]: """simple docstring""" return {self.__need().index(_A ): i for i in self.__need()} def __a ( self : List[Any] , **_A : int ) -> None: """simple docstring""" lowercase : Optional[int] = self.__need() lowercase : Optional[int] = self.__allocated_resources_table lowercase : List[str] = self.__available_resources() lowercase : Tuple = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('''_''' * 50 + '''\n''' ) while need_list: lowercase : int = False for each_need in need_list: lowercase : List[Any] = True for index, need in enumerate(_A ): if need > available_resources[index]: lowercase : Optional[int] = False break if execution: lowercase : Any = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: lowercase : Tuple = original_need_index print(f"""Process {process_number + 1} is executing.""" ) # remove the process run from stack need_list.remove(_A ) # update available/freed resources stack lowercase : str = np.array(_A ) + np.array( alloc_resources_table[process_number] ) print( '''Updated available resource stack for processes: ''' + ''' '''.join([str(_A ) for x in available_resources] ) ) break if safe: print('''The process is in a safe state.\n''' ) else: print('''System in unsafe state. Aborting...\n''' ) break def __a ( self : Tuple ) -> int: """simple docstring""" print(''' ''' * 9 + '''Allocated Resource Table''' ) for item in self.__allocated_resources_table: print( f"""P{self.__allocated_resources_table.index(_A ) + 1}""" + ''' '''.join(f"""{it:>8}""" for it in item ) + '''\n''' ) print(''' ''' * 9 + '''System Resource Table''' ) for item in self.__maximum_claim_table: print( f"""P{self.__maximum_claim_table.index(_A ) + 1}""" + ''' '''.join(f"""{it:>8}""" for it in item ) + '''\n''' ) print( '''Current Usage by Active Processes: ''' + ''' '''.join(str(_A ) for x in self.__claim_vector ) ) print( '''Initial Available Resources: ''' + ''' '''.join(str(_A ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
308
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def snake_case( __magic_name__ , __magic_name__=False ) -> List[str]: '''simple docstring''' lowercase : List[Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''module.cls_token''', '''vit.embeddings.cls_token'''), ('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''module.pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''module.norm.weight''', '''layernorm.weight'''), ('''module.norm.bias''', '''layernorm.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowercase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def snake_case( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: lowercase : Optional[int] = '''''' else: lowercase : List[Any] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase : Tuple = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" ) lowercase : List[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowercase : Tuple = in_proj_weight[ : config.hidden_size, : ] lowercase : str = in_proj_bias[: config.hidden_size] lowercase : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase : Dict = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase : Any = in_proj_weight[ -config.hidden_size :, : ] lowercase : Optional[int] = in_proj_bias[-config.hidden_size :] def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : str = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__magic_name__ , __magic_name__ ) def snake_case( __magic_name__ ) -> Tuple: '''simple docstring''' lowercase : Any = [ '''module.fc.fc1.weight''', '''module.fc.fc1.bias''', '''module.fc.bn1.weight''', '''module.fc.bn1.bias''', '''module.fc.bn1.running_mean''', '''module.fc.bn1.running_var''', '''module.fc.bn1.num_batches_tracked''', '''module.fc.fc2.weight''', '''module.fc.fc2.bias''', '''module.fc.bn2.weight''', '''module.fc.bn2.bias''', '''module.fc.bn2.running_mean''', '''module.fc.bn2.running_var''', '''module.fc.bn2.num_batches_tracked''', '''module.fc.fc3.weight''', '''module.fc.fc3.bias''', ] for k in ignore_keys: state_dict.pop(__magic_name__ , __magic_name__ ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' lowercase : List[Any] = dct.pop(__magic_name__ ) lowercase : Union[str, Any] = val def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' lowercase : Optional[Any] = ViTMSNConfig() lowercase : str = 10_00 lowercase : List[str] = '''datasets/huggingface/label-files''' lowercase : List[str] = '''imagenet-1k-id2label.json''' lowercase : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ ) , '''r''' ) ) lowercase : Union[str, Any] = {int(__magic_name__ ): v for k, v in idalabel.items()} lowercase : Any = idalabel lowercase : List[Any] = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: lowercase : int = 3_84 lowercase : Optional[Any] = 15_36 lowercase : Tuple = 6 elif "l16" in checkpoint_url: lowercase : Union[str, Any] = 10_24 lowercase : List[str] = 40_96 lowercase : int = 24 lowercase : Union[str, Any] = 16 lowercase : Tuple = 0.1 elif "b4" in checkpoint_url: lowercase : Union[str, Any] = 4 elif "l7" in checkpoint_url: lowercase : Dict = 7 lowercase : List[Any] = 10_24 lowercase : str = 40_96 lowercase : int = 24 lowercase : Dict = 16 lowercase : Tuple = 0.1 lowercase : int = ViTMSNModel(__magic_name__ ) lowercase : List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''target_encoder'''] lowercase : Any = ViTImageProcessor(size=config.image_size ) remove_projection_head(__magic_name__ ) lowercase : List[str] = create_rename_keys(__magic_name__ , base_model=__magic_name__ ) for src, dest in rename_keys: rename_key(__magic_name__ , __magic_name__ , __magic_name__ ) read_in_q_k_v(__magic_name__ , __magic_name__ , base_model=__magic_name__ ) model.load_state_dict(__magic_name__ ) model.eval() lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) lowercase : Dict = ViTImageProcessor( size=config.image_size , image_mean=__magic_name__ , image_std=__magic_name__ ) lowercase : List[str] = image_processor(images=__magic_name__ , return_tensors='''pt''' ) # forward pass torch.manual_seed(2 ) lowercase : int = model(**__magic_name__ ) lowercase : Optional[Any] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: lowercase : List[str] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] ) elif "b16" in checkpoint_url: lowercase : Any = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] ) elif "l16" in checkpoint_url: lowercase : Dict = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] ) elif "b4" in checkpoint_url: lowercase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] ) else: lowercase : Optional[int] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , __magic_name__ , atol=1e-4 ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(__magic_name__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__magic_name__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar', type=str, help='URL of the checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowerCAmelCase_ = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
308
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { 'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json', 'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json', 'kssteven/ibert-roberta-large-mnli': ( 'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json' ), } class _A ( _lowerCamelCase ): _UpperCamelCase : Union[str, Any] = '''ibert''' def __init__( self : int , _A : Union[str, Any]=30_522 , _A : List[Any]=768 , _A : Union[str, Any]=12 , _A : int=12 , _A : Optional[int]=3_072 , _A : Tuple="gelu" , _A : List[str]=0.1 , _A : Optional[int]=0.1 , _A : List[str]=512 , _A : List[str]=2 , _A : Union[str, Any]=0.02 , _A : Optional[Any]=1E-12 , _A : str=1 , _A : Dict=0 , _A : List[str]=2 , _A : List[Any]="absolute" , _A : Union[str, Any]=False , _A : List[Any]="none" , **_A : Dict , ) -> Optional[Any]: """simple docstring""" super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A ) lowercase : Any = vocab_size lowercase : Union[str, Any] = hidden_size lowercase : List[str] = num_hidden_layers lowercase : Optional[Any] = num_attention_heads lowercase : Dict = hidden_act lowercase : List[Any] = intermediate_size lowercase : str = hidden_dropout_prob lowercase : Tuple = attention_probs_dropout_prob lowercase : Optional[int] = max_position_embeddings lowercase : List[str] = type_vocab_size lowercase : int = initializer_range lowercase : Union[str, Any] = layer_norm_eps lowercase : Optional[Any] = position_embedding_type lowercase : List[str] = quant_mode lowercase : Dict = force_dequant class _A ( _lowerCamelCase ): @property def __a ( self : Dict ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": lowercase : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowercase : List[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
308
def snake_case( __magic_name__ , __magic_name__ ) -> float: '''simple docstring''' return price * (1 + tax_rate) if __name__ == "__main__": print(f'''{price_plus_tax(1_00, 0.2_5) = }''') print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
308
1
def snake_case( __magic_name__ ) -> list: '''simple docstring''' if len(__magic_name__ ) <= 1: return lst lowercase : Optional[int] = 1 while i < len(__magic_name__ ): if lst[i - 1] <= lst[i]: i += 1 else: lowercase , lowercase : str = lst[i], lst[i - 1] i -= 1 if i == 0: lowercase : Optional[Any] = 1 return lst if __name__ == "__main__": lowerCAmelCase_ = input('Enter numbers separated by a comma:\n').strip() lowerCAmelCase_ = [int(item) for item in user_input.split(',')] print(gnome_sort(unsorted))
308
import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class _A ( _lowerCamelCase ): def __init__( self : Tuple , _A : Dict , _A : Tuple , _A : List[Any]=1_024 , _A : str=1_024 , _A : str=3.6 ) -> Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = tokenizer lowercase : List[Any] = tokenizer.bos_token_id lowercase : Union[str, Any] = dataset lowercase : Union[str, Any] = seq_length lowercase : Optional[int] = seq_length * chars_per_token * num_of_sequences def __iter__( self : int ) -> int: """simple docstring""" lowercase : Dict = iter(self.dataset ) lowercase : Union[str, Any] = True while more_examples: lowercase , lowercase : Tuple = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(_A )['''content'''] ) buffer_len += len(buffer[-1] ) except StopIteration: lowercase : List[str] = False break lowercase : str = tokenizer(_A , truncation=_A )['''input_ids'''] lowercase : List[str] = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(_A ) , self.seq_length ): lowercase : int = all_token_ids[i : i + self.seq_length] if len(_A ) == self.seq_length: yield torch.tensor(_A ) def snake_case( __magic_name__ ) -> Optional[Any]: '''simple docstring''' lowercase : List[str] = {'''streaming''': True} lowercase : Dict = load_dataset(args.dataset_name , split='''train''' , **__magic_name__ ) lowercase : int = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length ) lowercase : Tuple = DataLoader(__magic_name__ , batch_size=args.batch_size ) return eval_dataloader def snake_case( __magic_name__ ) -> str: '''simple docstring''' model.eval() lowercase : str = [] for step, batch in enumerate(__magic_name__ ): with torch.no_grad(): lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ ) lowercase : List[Any] = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(__magic_name__ ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break lowercase : Union[str, Any] = torch.mean(torch.cat(__magic_name__ ) ) try: lowercase : Tuple = torch.exp(__magic_name__ ) except OverflowError: lowercase : List[str] = float('''inf''' ) return loss.item(), perplexity.item() # Setup Accelerator lowerCAmelCase_ = Accelerator() # Parse configuration lowerCAmelCase_ = HfArgumentParser(EvaluationArguments) lowerCAmelCase_ = parser.parse_args() set_seed(args.seed) # Logging lowerCAmelCase_ = logging.getLogger(__name__) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) # Load model and tokenizer lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt) lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader lowerCAmelCase_ = create_dataloader(args) # Prepare everything with our `accelerator`. lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('Evaluating and saving model after training') lowerCAmelCase_ , lowerCAmelCase_ = evaluate(args) logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
308
1
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase_ = { 'facebook/mask2former-swin-small-coco-instance': ( 'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json' ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } lowerCAmelCase_ = logging.get_logger(__name__) class _A ( _lowerCamelCase ): _UpperCamelCase : Optional[int] = '''mask2former''' _UpperCamelCase : Optional[Any] = ['''swin'''] _UpperCamelCase : Optional[int] = {'''hidden_size''': '''hidden_dim'''} def __init__( self : Any , _A : Optional[Dict] = None , _A : int = 256 , _A : int = 256 , _A : int = 256 , _A : int = 1_024 , _A : str = "relu" , _A : int = 6 , _A : int = 10 , _A : int = 8 , _A : float = 0.0 , _A : int = 2_048 , _A : bool = False , _A : bool = False , _A : int = 4 , _A : int = 255 , _A : int = 100 , _A : float = 0.1 , _A : float = 2.0 , _A : float = 5.0 , _A : float = 5.0 , _A : int = 12_544 , _A : float = 3.0 , _A : float = 0.75 , _A : float = 0.02 , _A : float = 1.0 , _A : bool = True , _A : List[int] = [4, 8, 16, 32] , _A : bool = None , **_A : str , ) -> List[str]: """simple docstring""" if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' ) lowercase : List[str] = CONFIG_MAPPING['''swin''']( image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) if isinstance(_A , _A ): lowercase : Any = backbone_config.pop('''model_type''' ) lowercase : Optional[int] = CONFIG_MAPPING[backbone_model_type] lowercase : List[str] = config_class.from_dict(_A ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ f"""Supported model types: {','.join(self.backbones_supported )}""" ) lowercase : Tuple = backbone_config lowercase : List[Any] = feature_size lowercase : Any = mask_feature_size lowercase : List[str] = hidden_dim lowercase : Union[str, Any] = encoder_feedforward_dim lowercase : Dict = activation_function lowercase : str = encoder_layers lowercase : Dict = decoder_layers lowercase : Any = num_attention_heads lowercase : Optional[int] = dropout lowercase : str = dim_feedforward lowercase : Tuple = pre_norm lowercase : Dict = enforce_input_projection lowercase : Optional[int] = common_stride lowercase : Union[str, Any] = ignore_value lowercase : Any = num_queries lowercase : Dict = no_object_weight lowercase : Dict = class_weight lowercase : Any = mask_weight lowercase : List[Any] = dice_weight lowercase : Optional[int] = train_num_points lowercase : Tuple = oversample_ratio lowercase : Any = importance_sample_ratio lowercase : Dict = init_std lowercase : Any = init_xavier_std lowercase : Any = use_auxiliary_loss lowercase : Optional[int] = feature_strides lowercase : str = output_auxiliary_logits lowercase : List[Any] = decoder_layers super().__init__(**_A ) @classmethod def __a ( cls : List[str] , _A : PretrainedConfig , **_A : Tuple ) -> Union[str, Any]: """simple docstring""" return cls( backbone_config=_A , **_A , ) def __a ( self : Union[str, Any] ) -> Dict[str, any]: """simple docstring""" lowercase : Any = copy.deepcopy(self.__dict__ ) lowercase : Tuple = self.backbone_config.to_dict() lowercase : str = self.__class__.model_type return output
308
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def snake_case( __magic_name__ ) -> Optional[Any]: '''simple docstring''' assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def snake_case( ) -> Optional[Any]: '''simple docstring''' assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def snake_case( ) -> int: '''simple docstring''' lowercase : List[str] = '''mock-s3-bucket''' lowercase : Optional[int] = F"""s3://{mock_bucket}""" lowercase : List[Any] = extract_path_from_uri(__magic_name__ ) assert dataset_path.startswith('''s3://''' ) is False lowercase : Optional[int] = '''./local/path''' lowercase : Dict = extract_path_from_uri(__magic_name__ ) assert dataset_path == new_dataset_path def snake_case( __magic_name__ ) -> Optional[Any]: '''simple docstring''' lowercase : Tuple = is_remote_filesystem(__magic_name__ ) assert is_remote is True lowercase : int = fsspec.filesystem('''file''' ) lowercase : Optional[Any] = is_remote_filesystem(__magic_name__ ) assert is_remote is False @pytest.mark.parametrize('''compression_fs_class''' , __magic_name__ ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]: '''simple docstring''' lowercase : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file} lowercase : List[Any] = input_paths[compression_fs_class.protocol] if input_path is None: lowercase : Dict = F"""for '{compression_fs_class.protocol}' compression protocol, """ if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(__magic_name__ ) lowercase : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__magic_name__ ) assert isinstance(__magic_name__ , __magic_name__ ) lowercase : List[Any] = os.path.basename(__magic_name__ ) lowercase : Tuple = expected_filename[: expected_filename.rindex('''.''' )] assert fs.glob('''*''' ) == [expected_filename] with fs.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f, open(__magic_name__ , encoding='''utf-8''' ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]: '''simple docstring''' lowercase : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path} lowercase : List[str] = compressed_file_paths[protocol] lowercase : str = '''dataset.jsonl''' lowercase : List[str] = F"""{protocol}://{member_file_path}::{compressed_file_path}""" lowercase , *lowercase : Tuple = fsspec.get_fs_token_paths(__magic_name__ ) assert fs.isfile(__magic_name__ ) assert not fs.isfile('''non_existing_''' + member_file_path ) @pytest.mark.integration def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' lowercase : Optional[Any] = hf_api.dataset_info(__magic_name__ , token=__magic_name__ ) lowercase : int = HfFileSystem(repo_info=__magic_name__ , token=__magic_name__ ) assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"] assert hffs.isdir('''data''' ) assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' ) with open(__magic_name__ ) as f: assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read() def snake_case( ) -> List[Any]: '''simple docstring''' lowercase : List[Any] = '''bz2''' # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(__magic_name__ , __magic_name__ , clobber=__magic_name__ ) with pytest.warns(__magic_name__ ) as warning_info: importlib.reload(datasets.filesystems ) assert len(__magic_name__ ) == 1 assert ( str(warning_info[0].message ) == F"""A filesystem protocol was already set for {protocol} and will be overwritten.""" )
308
1
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def snake_case( ) -> int: '''simple docstring''' lowercase : List[str] = ArgumentParser( description=( '''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes''' ) ) # Optional arguments for the launch helper parser.add_argument('''--num_cores''' , type=__magic_name__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' ) # positional parser.add_argument( '''training_script''' , type=__magic_name__ , help=( '''The full path to the single TPU training ''' '''program/script to be launched in parallel, ''' '''followed by all the arguments for the ''' '''training script''' ) , ) # rest from the training program parser.add_argument('''training_script_args''' , nargs=__magic_name__ ) return parser.parse_args() def snake_case( ) -> Union[str, Any]: '''simple docstring''' lowercase : Optional[Any] = parse_args() # Import training_script as a module. lowercase : Optional[Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowercase : int = script_fpath.stem lowercase : List[Any] = importlib.import_module(__magic_name__ ) # Patch sys.argv lowercase : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
308
import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowerCAmelCase_ = logging.get_logger(__name__) class _A ( enum.Enum ): _UpperCamelCase : Union[str, Any] = 0 _UpperCamelCase : Any = 1 @add_end_docstrings(_lowerCamelCase ) class _A ( _lowerCamelCase ): _UpperCamelCase : List[Any] = '''generated''' def __init__( self : str , *_A : int , **_A : str ) -> Union[str, Any]: """simple docstring""" super().__init__(*_A , **_A ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def __a ( self : int , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=None , _A : Dict=None , _A : Union[str, Any]=None , _A : int=None , **_A : Optional[int] , ) -> List[Any]: """simple docstring""" lowercase : str = {} if truncation is not None: lowercase : Tuple = truncation lowercase : Tuple = generate_kwargs lowercase : Optional[Any] = {} if return_tensors is not None and return_type is None: lowercase : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: lowercase : Dict = return_type if clean_up_tokenization_spaces is not None: lowercase : Dict = clean_up_tokenization_spaces if stop_sequence is not None: lowercase : Dict = self.tokenizer.encode(_A , add_special_tokens=_A ) if len(_A ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) lowercase : List[str] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def __a ( self : str , _A : int , _A : int , _A : int ) -> List[Any]: """simple docstring""" return True def __a ( self : Union[str, Any] , *_A : Union[str, Any] , _A : List[Any] ) -> Dict: """simple docstring""" lowercase : Tuple = self.model.config.prefix if self.model.config.prefix is not None else '''''' if isinstance(args[0] , _A ): if self.tokenizer.pad_token_id is None: raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' ) lowercase : List[Any] = ([prefix + arg for arg in args[0]],) lowercase : Dict = True elif isinstance(args[0] , _A ): lowercase : Optional[int] = (prefix + args[0],) lowercase : Union[str, Any] = False else: raise ValueError( f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" ) lowercase : Any = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self : Union[str, Any] , *_A : Optional[int] , **_A : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase : Any = super().__call__(*_A , **_A ) if ( isinstance(args[0] , _A ) and all(isinstance(_A , _A ) for el in args[0] ) and all(len(_A ) == 1 for res in result ) ): return [res[0] for res in result] return result def __a ( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : List[str] ) -> List[Any]: """simple docstring""" lowercase : Optional[int] = self._parse_and_tokenize(_A , truncation=_A , **_A ) return inputs def __a ( self : int , _A : Optional[Any] , **_A : Any ) -> Any: """simple docstring""" if self.framework == "pt": lowercase , lowercase : List[Any] = model_inputs['''input_ids'''].shape elif self.framework == "tf": lowercase , lowercase : Optional[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy() lowercase : int = generate_kwargs.get('''min_length''' , self.model.config.min_length ) lowercase : Optional[int] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] ) lowercase : int = self.model.generate(**_A , **_A ) lowercase : int = output_ids.shape[0] if self.framework == "pt": lowercase : Optional[Any] = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": lowercase : Tuple = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def __a ( self : Union[str, Any] , _A : str , _A : Optional[int]=ReturnType.TEXT , _A : Optional[int]=False ) -> Tuple: """simple docstring""" lowercase : Any = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: lowercase : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids} elif return_type == ReturnType.TEXT: lowercase : Dict = { f"""{self.return_name}_text""": self.tokenizer.decode( _A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) } records.append(_A ) return records @add_end_docstrings(_lowerCamelCase ) class _A ( _lowerCamelCase ): _UpperCamelCase : List[str] = '''summary''' def __call__( self : List[Any] , *_A : List[str] , **_A : Union[str, Any] ) -> Optional[int]: """simple docstring""" return super().__call__(*_A , **_A ) def __a ( self : Any , _A : int , _A : int , _A : int ) -> bool: """simple docstring""" if max_length < min_length: logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" ) if input_length < max_length: logger.warning( f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """ '''a summarization task, where outputs shorter than the input are typically wanted, you might ''' f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" ) @add_end_docstrings(_lowerCamelCase ) class _A ( _lowerCamelCase ): _UpperCamelCase : List[str] = '''translation''' def __a ( self : Union[str, Any] , _A : int , _A : int , _A : int ) -> List[Any]: """simple docstring""" if input_length > 0.9 * max_length: logger.warning( f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """ '''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' ) return True def __a ( self : Optional[Any] , *_A : Optional[Any] , _A : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , _A : List[Any]=None , _A : Any=None ) -> Dict: """simple docstring""" if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ): return self.tokenizer._build_translation_inputs( *_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A ) else: return super()._parse_and_tokenize(*_A , truncation=_A ) def __a ( self : Any , _A : Tuple=None , _A : Any=None , **_A : Any ) -> Optional[int]: """simple docstring""" lowercase , lowercase , lowercase : Dict = super()._sanitize_parameters(**_A ) if src_lang is not None: lowercase : Optional[Any] = src_lang if tgt_lang is not None: lowercase : Dict = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. lowercase : Dict = kwargs.get('''task''' , self.task ) lowercase : List[str] = task.split('''_''' ) if task and len(_A ) == 4: # translation, XX, to YY lowercase : Any = items[1] lowercase : List[str] = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self : Tuple , *_A : Union[str, Any] , **_A : List[Any] ) -> List[Any]: """simple docstring""" return super().__call__(*_A , **_A )
308
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'MRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MraForMaskedLM', 'MraForMultipleChoice', 'MraForQuestionAnswering', 'MraForSequenceClassification', 'MraForTokenClassification', 'MraLayer', 'MraModel', 'MraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
308
# Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCAmelCase_ = get_logger(__name__) class _A : _UpperCamelCase : int = '''dummy_data''' _UpperCamelCase : Tuple = '''datasets''' _UpperCamelCase : Optional[int] = False def __init__( self : Any , _A : str , _A : str , _A : Union[Version, str] , _A : Optional[str] = None , _A : bool = False , _A : bool = True , _A : Optional[List[Callable]] = None , ) -> Dict: """simple docstring""" lowercase : Tuple = 0 lowercase : List[Any] = dataset_name lowercase : int = cache_dir lowercase : str = use_local_dummy_data lowercase : Union[str, Any] = config # download_callbacks take a single url as input lowercase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root lowercase : Any = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general lowercase : Union[str, Any] = str(_A ) # to be downloaded lowercase : Tuple = None lowercase : Optional[int] = None @property def __a ( self : str ) -> Dict: """simple docstring""" if self._dummy_file is None: lowercase : Optional[Any] = self.download_dummy_data() return self._dummy_file @property def __a ( self : int ) -> Optional[Any]: """simple docstring""" if self.config is not None: # structure is dummy / config_name / version_name return os.path.join('''dummy''' , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join('''dummy''' , self.version_name ) @property def __a ( self : List[Any] ) -> int: """simple docstring""" return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' ) def __a ( self : str ) -> int: """simple docstring""" lowercase : str = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) lowercase : List[str] = cached_path( _A , cache_dir=self.cache_dir , extract_compressed_file=_A , force_extract=_A ) return os.path.join(_A , self.dummy_file_name ) @property def __a ( self : str ) -> Tuple: """simple docstring""" return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def __a ( self : Optional[int] ) -> Optional[int]: """simple docstring""" if self._bucket_url is None: lowercase : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) ) return self._bucket_url @property def __a ( self : Tuple ) -> List[str]: """simple docstring""" if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] ) def __a ( self : Union[str, Any] , _A : Dict , *_A : Union[str, Any] ) -> Optional[Any]: """simple docstring""" if self.load_existing_dummy_data: # dummy data is downloaded and tested lowercase : Union[str, Any] = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned lowercase : Optional[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(_A , _A ): return self.create_dummy_data_dict(_A , _A ) elif isinstance(_A , (list, tuple) ): return self.create_dummy_data_list(_A , _A ) else: return self.create_dummy_data_single(_A , _A ) def __a ( self : str , _A : Union[str, Any] , *_A : Dict ) -> Dict: """simple docstring""" return self.download_and_extract(_A ) def __a ( self : str , _A : List[str] , _A : Any ) -> Union[str, Any]: """simple docstring""" return self.download_and_extract(_A ) def __a ( self : Optional[int] , _A : Tuple , *_A : str , **_A : Any ) -> Optional[Any]: """simple docstring""" return path def __a ( self : List[str] ) -> str: """simple docstring""" return {} def __a ( self : List[str] , _A : Union[str, Any] , _A : List[Any] ) -> Optional[Any]: """simple docstring""" lowercase : Any = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(_A , _A ): for single_url in single_urls: download_callback(_A ) else: lowercase : List[str] = single_urls download_callback(_A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(_A , _A ): lowercase : int = [os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) for x in single_urls] else: lowercase : int = single_urls lowercase : Any = os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) lowercase : str = value # make sure that values are unique if all(isinstance(_A , _A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique lowercase : str = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def __a ( self : Optional[int] , _A : List[Any] , _A : Tuple ) -> Tuple: """simple docstring""" lowercase : Optional[Any] = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one lowercase : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , _A ) ) for url in data_url ) lowercase : str = all( url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): lowercase : List[str] = [data_url[0]] * len(_A ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(_A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowercase : Optional[int] = os.path.join(_A , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) ) dummy_data_list.append(_A ) return dummy_data_list def __a ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ) -> List[str]: """simple docstring""" for download_callback in self.download_callbacks: download_callback(_A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowercase : Dict = os.path.join(_A , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) ) if os.path.exists(_A ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def __a ( self : Union[str, Any] ) -> Any: """simple docstring""" pass def __a ( self : Any ) -> Dict: """simple docstring""" pass def __a ( self : int , _A : Optional[Any] ) -> Dict: """simple docstring""" def _iter_archive_members(_A : Optional[int] ): # this preserves the order of the members inside the ZIP archive lowercase : int = Path(self.dummy_file ).parent lowercase : List[str] = path.relative_to(_A ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: lowercase : Optional[int] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(_A ) lowercase : Tuple = Path(_A ) lowercase : List[Any] = _iter_archive_members(_A ) if self.use_local_dummy_data else path.rglob('''*''' ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ): yield file_path.relative_to(_A ).as_posix(), file_path.open('''rb''' ) def __a ( self : Optional[Any] , _A : Dict ) -> Union[str, Any]: """simple docstring""" if not isinstance(_A , _A ): lowercase : Dict = [paths] for path in paths: if os.path.isfile(_A ): if os.path.basename(_A ).startswith(('''.''', '''__''') ): return yield path else: for dirpath, dirnames, filenames in os.walk(_A ): if os.path.basename(_A ).startswith(('''.''', '''__''') ): continue dirnames.sort() for filename in sorted(_A ): if filename.startswith(('''.''', '''__''') ): continue yield os.path.join(_A , _A )
308
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase_ = logging.get_logger(__name__) def snake_case( __magic_name__ , __magic_name__ ) -> List[Any]: '''simple docstring''' lowercase : int = b.T lowercase : Any = np.sum(np.square(__magic_name__ ) , axis=1 ) lowercase : Tuple = np.sum(np.square(__magic_name__ ) , axis=0 ) lowercase : Dict = np.matmul(__magic_name__ , __magic_name__ ) lowercase : List[Any] = aa[:, None] - 2 * ab + ba[None, :] return d def snake_case( __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' lowercase : Optional[Any] = x.reshape(-1 , 3 ) lowercase : str = squared_euclidean_distance(__magic_name__ , __magic_name__ ) return np.argmin(__magic_name__ , axis=1 ) class _A ( _lowerCamelCase ): _UpperCamelCase : Dict = ['''pixel_values'''] def __init__( self : Any , _A : Optional[Union[List[List[int]], np.ndarray]] = None , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : bool = True , **_A : List[str] , ) -> None: """simple docstring""" super().__init__(**_A ) lowercase : str = size if size is not None else {'''height''': 256, '''width''': 256} lowercase : List[Any] = get_size_dict(_A ) lowercase : Any = np.array(_A ) if clusters is not None else None lowercase : Union[str, Any] = do_resize lowercase : Tuple = size lowercase : Optional[int] = resample lowercase : Union[str, Any] = do_normalize lowercase : Tuple = do_color_quantize def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray: """simple docstring""" lowercase : Tuple = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" ) return resize( _A , size=(size['''height'''], size['''width''']) , resample=_A , data_format=_A , **_A ) def __a ( self : str , _A : np.ndarray , _A : Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray: """simple docstring""" lowercase : str = rescale(image=_A , scale=1 / 127.5 , data_format=_A ) lowercase : Any = image - 1 return image def __a ( self : Optional[int] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Optional[bool] = None , _A : Optional[Union[List[List[int]], np.ndarray]] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **_A : int , ) -> PIL.Image.Image: """simple docstring""" lowercase : Tuple = do_resize if do_resize is not None else self.do_resize lowercase : Tuple = size if size is not None else self.size lowercase : Dict = get_size_dict(_A ) lowercase : List[str] = resample if resample is not None else self.resample lowercase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize lowercase : int = do_color_quantize if do_color_quantize is not None else self.do_color_quantize lowercase : str = clusters if clusters is not None else self.clusters lowercase : Any = np.array(_A ) lowercase : Union[str, Any] = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_color_quantize and clusters is None: raise ValueError('''Clusters must be specified if do_color_quantize is True.''' ) # All transformations expect numpy arrays. lowercase : Any = [to_numpy_array(_A ) for image in images] if do_resize: lowercase : Tuple = [self.resize(image=_A , size=_A , resample=_A ) for image in images] if do_normalize: lowercase : Any = [self.normalize(image=_A ) for image in images] if do_color_quantize: lowercase : List[str] = [to_channel_dimension_format(_A , ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) lowercase : Tuple = np.array(_A ) lowercase : Optional[Any] = color_quantize(_A , _A ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) lowercase : List[Any] = images.shape[0] lowercase : List[Any] = images.reshape(_A , -1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. lowercase : Optional[Any] = list(_A ) else: lowercase : Dict = [to_channel_dimension_format(_A , _A ) for image in images] lowercase : int = {'''input_ids''': images} return BatchFeature(data=_A , tensor_type=_A )
308
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' lowercase : Union[str, Any] = [False] * len(__magic_name__ ) lowercase : Optional[int] = [] queue.append(__magic_name__ ) lowercase : int = True while queue: lowercase : Union[str, Any] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__magic_name__ ) lowercase : Dict = True lowercase : List[str] = u return visited[t] def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple: '''simple docstring''' lowercase : List[str] = [-1] * (len(__magic_name__ )) lowercase : Tuple = 0 while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): lowercase : Any = float('''Inf''' ) lowercase : str = sink while s != source: # Find the minimum value in select path lowercase : Any = min(__magic_name__ , graph[parent[s]][s] ) lowercase : Dict = parent[s] max_flow += path_flow lowercase : Union[str, Any] = sink while v != source: lowercase : List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowercase : Optional[int] = parent[v] return max_flow lowerCAmelCase_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] lowerCAmelCase_ , lowerCAmelCase_ = 0, 5 print(ford_fulkerson(graph, source, sink))
308
1
from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 lowerCAmelCase_ = { # 1536-bit 5: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 2048-bit 14: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AACAA68FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 3072-bit 15: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64' + 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7' + 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B' + 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31' + '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 4096-bit 16: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64' + 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7' + 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B' + 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31' + '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7' + '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA' + '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6' + '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED' + '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9' + '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199' + 'FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 6144-bit 17: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08' + '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B' + '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9' + 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6' + '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8' + 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C' + '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718' + '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D' + '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D' + 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226' + '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC' + 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26' + '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB' + '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2' + '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127' + 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492' + '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406' + 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918' + 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151' + '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03' + 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F' + 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA' + 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B' + 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632' + '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E' + '6DCC4024FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 8192-bit 18: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64' + 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7' + 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B' + 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31' + '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7' + '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA' + '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6' + '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED' + '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9' + '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492' + '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD' + 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831' + '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B' + 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF' + '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6' + 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3' + '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA' + 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328' + '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C' + 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE' + '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4' + '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300' + '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568' + '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9' + '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B' + '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A' + '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36' + '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1' + 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92' + '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47' + '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71' + '60C980DD98EDD3DFFFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, } class _A : def __init__( self : Optional[Any] , _A : int = 14 ) -> None: """simple docstring""" if group not in primes: raise ValueError('''Unsupported Group''' ) lowercase : int = primes[group]['''prime'''] lowercase : Optional[int] = primes[group]['''generator'''] lowercase : str = int(hexlify(urandom(32 ) ) , base=16 ) def __a ( self : Optional[int] ) -> str: """simple docstring""" return hex(self.__private_key )[2:] def __a ( self : str ) -> str: """simple docstring""" lowercase : List[Any] = pow(self.generator , self.__private_key , self.prime ) return hex(_A )[2:] def __a ( self : List[Any] , _A : int ) -> bool: """simple docstring""" return ( 2 <= key <= self.prime - 2 and pow(_A , (self.prime - 1) // 2 , self.prime ) == 1 ) def __a ( self : int , _A : str ) -> str: """simple docstring""" lowercase : List[str] = int(_A , base=16 ) if not self.is_valid_public_key(_A ): raise ValueError('''Invalid public key''' ) lowercase : Tuple = pow(_A , self.__private_key , self.prime ) return shaaaa(str(_A ).encode() ).hexdigest() @staticmethod def __a ( _A : int , _A : int ) -> bool: """simple docstring""" return ( 2 <= remote_public_key_str <= prime - 2 and pow(_A , (prime - 1) // 2 , _A ) == 1 ) @staticmethod def __a ( _A : str , _A : str , _A : int = 14 ) -> str: """simple docstring""" lowercase : Any = int(_A , base=16 ) lowercase : int = int(_A , base=16 ) lowercase : str = primes[group]['''prime'''] if not DiffieHellman.is_valid_public_key_static(_A , _A ): raise ValueError('''Invalid public key''' ) lowercase : Tuple = pow(_A , _A , _A ) return shaaaa(str(_A ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
308
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'vocab_file': 'vocab.txt'} lowerCAmelCase_ = { 'vocab_file': { 'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt', }, } lowerCAmelCase_ = { 'openbmb/cpm-ant-10b': 10_24, } def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : Optional[int] = collections.OrderedDict() with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader: lowercase : str = reader.readlines() for index, token in enumerate(__magic_name__ ): lowercase : Union[str, Any] = token.rstrip('''\n''' ) lowercase : List[Any] = index return vocab class _A ( _lowerCamelCase ): def __init__( self : List[str] , _A : Any , _A : List[str]="<unk>" , _A : Union[str, Any]=200 ) -> List[Any]: """simple docstring""" lowercase : Optional[int] = vocab lowercase : List[str] = unk_token lowercase : Any = max_input_chars_per_word def __a ( self : List[str] , _A : Tuple ) -> str: """simple docstring""" lowercase : Dict = list(_A ) if len(_A ) > self.max_input_chars_per_word: return [self.unk_token] lowercase : int = 0 lowercase : Dict = [] while start < len(_A ): lowercase : Optional[Any] = len(_A ) lowercase : List[str] = None while start < end: lowercase : List[Any] = ''''''.join(chars[start:end] ) if substr in self.vocab: lowercase : Union[str, Any] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(_A ) lowercase : Dict = end return sub_tokens class _A ( _lowerCamelCase ): _UpperCamelCase : List[str] = VOCAB_FILES_NAMES _UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask'''] _UpperCamelCase : int = False def __init__( self : List[str] , _A : int , _A : Optional[Any]="<d>" , _A : Any="</d>" , _A : Optional[Any]="<s>" , _A : Any="</s>" , _A : Any="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]="</n>" , _A : List[str]="</_>" , _A : Optional[Any]="left" , **_A : str , ) -> Tuple: """simple docstring""" requires_backends(self , ['''jieba'''] ) super().__init__( bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , ) lowercase : str = bod_token lowercase : str = eod_token lowercase : Any = load_vocab(_A ) lowercase : List[Any] = self.encoder[space_token] lowercase : Tuple = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] lowercase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) ) lowercase : int = {v: k for k, v in self.encoder.items()} lowercase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def __a ( self : Dict ) -> Optional[int]: """simple docstring""" return self.encoder[self.bod_token] @property def __a ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" return self.encoder[self.eod_token] @property def __a ( self : List[str] ) -> List[str]: """simple docstring""" return self.encoder["\n"] @property def __a ( self : List[Any] ) -> int: """simple docstring""" return len(self.encoder ) def __a ( self : Union[str, Any] ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self : str , _A : List[str] ) -> Tuple: """simple docstring""" lowercase : int = [] for x in jieba.cut(_A , cut_all=_A ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) ) return output_tokens def __a ( self : List[Any] , _A : Tuple , **_A : Optional[int] ) -> Any: """simple docstring""" lowercase : List[str] = [i for i in token_ids if i >= 0] lowercase : Any = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(_A , **_A ) def __a ( self : List[Any] , _A : int ) -> Optional[Any]: """simple docstring""" return token in self.encoder def __a ( self : Dict , _A : List[str] ) -> str: """simple docstring""" return "".join(_A ) def __a ( self : List[str] , _A : List[str] ) -> Any: """simple docstring""" return self.encoder.get(_A , self.encoder.get(self.unk_token ) ) def __a ( self : Tuple , _A : Union[str, Any] ) -> Tuple: """simple docstring""" return self.decoder.get(_A , self.unk_token ) def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if os.path.isdir(_A ): lowercase : str = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: lowercase : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory lowercase : Any = 0 if " " in self.encoder: lowercase : List[Any] = self.encoder[''' '''] del self.encoder[" "] if "\n" in self.encoder: lowercase : Dict = self.encoder['''\n'''] del self.encoder["\n"] lowercase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) ) with open(_A , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) lowercase : Any = token_index writer.write(token + '''\n''' ) index += 1 return (vocab_file,) def __a ( self : str , _A : List[int] , _A : List[int] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is not None: return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) return [1] + ([0] * len(_A ))
308
1
from __future__ import annotations def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , ) -> tuple[str, float]: '''simple docstring''' if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif stress < 0: raise ValueError('''Stress cannot be negative''' ) elif tangential_force < 0: raise ValueError('''Tangential Force cannot be negative''' ) elif area < 0: raise ValueError('''Area cannot be negative''' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
308
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]: '''simple docstring''' lowercase : int = 1.5 lowercase : int = int(factor * num_class_images ) lowercase : Any = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 ) os.makedirs(F"""{class_data_dir}/images""" , exist_ok=__magic_name__ ) if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images: return while True: lowercase : str = client.query(text=__magic_name__ ) if len(__magic_name__ ) >= factor * num_class_images or num_images > 1e4: break else: lowercase : List[str] = int(factor * num_images ) lowercase : List[str] = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , ) lowercase : Dict = 0 lowercase : Optional[Any] = 0 lowercase : List[Any] = tqdm(desc='''downloading real regularization images''' , total=__magic_name__ ) with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open( F"""{class_data_dir}/images.txt""" , '''w''' ) as fa: while total < num_class_images: lowercase : int = class_images[count] count += 1 try: lowercase : int = requests.get(images['''url'''] ) if img.status_code == 2_00: lowercase : List[Any] = Image.open(BytesIO(img.content ) ) with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f: f.write(img.content ) fa.write(images['''caption'''] + '''\n''' ) fa.write(images['''url'''] + '''\n''' ) fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def snake_case( ) -> Optional[int]: '''simple docstring''' lowercase : List[str] = argparse.ArgumentParser('''''' , add_help=__magic_name__ ) parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ ) parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ ) parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=__magic_name__ ) return parser.parse_args() if __name__ == "__main__": lowerCAmelCase_ = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
308
1
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name lowerCAmelCase_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n' def snake_case( __magic_name__ , __magic_name__ , __magic_name__=8 ) -> Optional[Any]: '''simple docstring''' lowercase : str = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 lowercase : str = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class _A ( _lowerCamelCase ): def __init__( self : Union[str, Any] , _A : UNetaDConditionModel , _A : DDPMScheduler , _A : VQModel , ) -> int: """simple docstring""" super().__init__() self.register_modules( unet=_A , scheduler=_A , movq=_A , ) lowercase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __a ( self : int , _A : Optional[Any] , _A : int , _A : Optional[int] , _A : Dict , _A : int , _A : Optional[Any] ) -> Dict: """simple docstring""" if latents is None: lowercase : Tuple = randn_tensor(_A , generator=_A , device=_A , dtype=_A ) else: if latents.shape != shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) lowercase : Optional[int] = latents.to(_A ) lowercase : Any = latents * scheduler.init_noise_sigma return latents def __a ( self : Dict , _A : List[Any]=0 ) -> Optional[Any]: """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) lowercase : Optional[int] = torch.device(f"""cuda:{gpu_id}""" ) lowercase : int = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_A , _A ) def __a ( self : int , _A : List[Any]=0 ) -> List[Any]: """simple docstring""" if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' ) lowercase : List[str] = torch.device(f"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to('''cpu''' , silence_dtype_warnings=_A ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) lowercase : int = None for cpu_offloaded_model in [self.unet, self.movq]: lowercase , lowercase : Dict = cpu_offload_with_hook(_A , _A , prev_module_hook=_A ) # We'll offload the last model manually. lowercase : str = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __a ( self : List[Any] ) -> List[str]: """simple docstring""" if not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(_A , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_A ) def __call__( self : Optional[int] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : torch.FloatTensor , _A : int = 512 , _A : int = 512 , _A : int = 100 , _A : float = 4.0 , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , ) -> int: """simple docstring""" lowercase : List[Any] = self._execution_device lowercase : List[Any] = guidance_scale > 1.0 if isinstance(_A , _A ): lowercase : List[str] = torch.cat(_A , dim=0 ) if isinstance(_A , _A ): lowercase : Optional[int] = torch.cat(_A , dim=0 ) if isinstance(_A , _A ): lowercase : Optional[Any] = torch.cat(_A , dim=0 ) lowercase : List[str] = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: lowercase : List[str] = image_embeds.repeat_interleave(_A , dim=0 ) lowercase : Tuple = negative_image_embeds.repeat_interleave(_A , dim=0 ) lowercase : Any = hint.repeat_interleave(_A , dim=0 ) lowercase : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A ) lowercase : str = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_A ) self.scheduler.set_timesteps(_A , device=_A ) lowercase : Optional[Any] = self.scheduler.timesteps lowercase : Optional[int] = self.movq.config.latent_channels lowercase , lowercase : Dict = downscale_height_and_width(_A , _A , self.movq_scale_factor ) # create initial latent lowercase : List[Any] = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , _A , _A , _A , self.scheduler , ) for i, t in enumerate(self.progress_bar(_A ) ): # expand the latents if we are doing classifier free guidance lowercase : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase : Optional[Any] = {'''image_embeds''': image_embeds, '''hint''': hint} lowercase : Tuple = self.unet( sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0] if do_classifier_free_guidance: lowercase , lowercase : Any = noise_pred.split(latents.shape[1] , dim=1 ) lowercase , lowercase : int = noise_pred.chunk(2 ) lowercase , lowercase : Any = variance_pred.chunk(2 ) lowercase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) lowercase : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , '''variance_type''' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): lowercase , lowercase : Any = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 lowercase : Tuple = self.scheduler.step( _A , _A , _A , generator=_A , )[0] # post-processing lowercase : Dict = self.movq.decode(_A , force_not_quantize=_A )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: lowercase : Tuple = image * 0.5 + 0.5 lowercase : int = image.clamp(0 , 1 ) lowercase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowercase : Union[str, Any] = self.numpy_to_pil(_A ) if not return_dict: return (image,) return ImagePipelineOutput(images=_A )
308
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def snake_case( ) -> int: '''simple docstring''' lowercase : List[str] = ArgumentParser( description=( '''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes''' ) ) # Optional arguments for the launch helper parser.add_argument('''--num_cores''' , type=__magic_name__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' ) # positional parser.add_argument( '''training_script''' , type=__magic_name__ , help=( '''The full path to the single TPU training ''' '''program/script to be launched in parallel, ''' '''followed by all the arguments for the ''' '''training script''' ) , ) # rest from the training program parser.add_argument('''training_script_args''' , nargs=__magic_name__ ) return parser.parse_args() def snake_case( ) -> Union[str, Any]: '''simple docstring''' lowercase : Optional[Any] = parse_args() # Import training_script as a module. lowercase : Optional[Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowercase : int = script_fpath.stem lowercase : List[Any] = importlib.import_module(__magic_name__ ) # Patch sys.argv lowercase : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
308
1
from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline lowerCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(_lowerCamelCase ) class _A ( _lowerCamelCase ): def __init__( self : Optional[int] , **_A : Dict ) -> int: """simple docstring""" super().__init__(**_A ) if self.framework != "pt": raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" ) # No specific FOR_XXX available yet def __call__( self : Dict , _A : Union[np.ndarray, bytes, str] , **_A : Optional[int] ) -> Optional[Any]: """simple docstring""" return super().__call__(_A , **_A ) def __a ( self : str , **_A : Dict ) -> Any: """simple docstring""" lowercase : str = {} if "candidate_labels" in kwargs: lowercase : Optional[Any] = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: lowercase : Union[str, Any] = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def __a ( self : Optional[int] , _A : str , _A : Union[str, Any]=None , _A : str="This is a sound of {}." ) -> Optional[Any]: """simple docstring""" if isinstance(_A , _A ): if audio.startswith('''http://''' ) or audio.startswith('''https://''' ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png lowercase : int = requests.get(_A ).content else: with open(_A , '''rb''' ) as f: lowercase : List[Any] = f.read() if isinstance(_A , _A ): lowercase : List[str] = ffmpeg_read(_A , self.feature_extractor.sampling_rate ) if not isinstance(_A , np.ndarray ): raise ValueError('''We expect a numpy ndarray as input''' ) if len(audio.shape ) != 1: raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' ) lowercase : Optional[Any] = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' ) lowercase : List[str] = candidate_labels lowercase : Union[str, Any] = [hypothesis_template.format(_A ) for x in candidate_labels] lowercase : Tuple = self.tokenizer(_A , return_tensors=self.framework , padding=_A ) lowercase : Tuple = [text_inputs] return inputs def __a ( self : Optional[int] , _A : Union[str, Any] ) -> Dict: """simple docstring""" lowercase : int = model_inputs.pop('''candidate_labels''' ) lowercase : Optional[Any] = model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0] , _A ): lowercase : Any = text_inputs[0] else: # Batching case. lowercase : Tuple = text_inputs[0][0] lowercase : Dict = self.model(**_A , **_A ) lowercase : Any = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_audio, } return model_outputs def __a ( self : str , _A : Tuple ) -> List[Any]: """simple docstring""" lowercase : Dict = model_outputs.pop('''candidate_labels''' ) lowercase : Any = model_outputs['''logits'''][0] if self.framework == "pt": lowercase : Optional[Any] = logits.softmax(dim=0 ) lowercase : Dict = probs.tolist() else: raise ValueError('''`tf` framework not supported.''' ) lowercase : Dict = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] ) ] return result
308
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase_ = logging.get_logger(__name__) def snake_case( __magic_name__ ) -> List[List[ImageInput]]: '''simple docstring''' if isinstance(__magic_name__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__magic_name__ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__magic_name__ ): return [[videos]] raise ValueError(F"""Could not make batched video from {videos}""" ) class _A ( _lowerCamelCase ): _UpperCamelCase : str = ['''pixel_values'''] def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[int] , ) -> None: """simple docstring""" super().__init__(**_A ) lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 224} lowercase : Tuple = get_size_dict(_A , default_to_square=_A ) lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowercase : Dict = get_size_dict(_A , param_name='''crop_size''' ) lowercase : List[str] = do_resize lowercase : Optional[Any] = size lowercase : List[str] = do_center_crop lowercase : List[Any] = crop_size lowercase : str = resample lowercase : Tuple = do_rescale lowercase : Any = rescale_factor lowercase : Tuple = do_normalize lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray: """simple docstring""" lowercase : Tuple = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" in size: lowercase : Dict = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A ) elif "height" in size and "width" in size: lowercase : Union[str, Any] = (size['''height'''], size['''width''']) else: raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(_A , size=_A , resample=_A , data_format=_A , **_A ) def __a ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray: """simple docstring""" lowercase : Optional[Any] = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A ) def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Union[str, Any]: """simple docstring""" return rescale(_A , scale=_A , data_format=_A , **_A ) def __a ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray: """simple docstring""" return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def __a ( self : int , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: """simple docstring""" if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowercase : Union[str, Any] = to_numpy_array(_A ) if do_resize: lowercase : List[Any] = self.resize(image=_A , size=_A , resample=_A ) if do_center_crop: lowercase : Optional[int] = self.center_crop(_A , size=_A ) if do_rescale: lowercase : Tuple = self.rescale(image=_A , scale=_A ) if do_normalize: lowercase : Union[str, Any] = self.normalize(image=_A , mean=_A , std=_A ) lowercase : Any = to_channel_dimension_format(_A , _A ) return image def __a ( self : List[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Union[str, Any] , ) -> PIL.Image.Image: """simple docstring""" lowercase : str = do_resize if do_resize is not None else self.do_resize lowercase : Optional[Any] = resample if resample is not None else self.resample lowercase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase : str = do_rescale if do_rescale is not None else self.do_rescale lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean lowercase : Optional[Any] = image_std if image_std is not None else self.image_std lowercase : str = size if size is not None else self.size lowercase : Any = get_size_dict(_A , default_to_square=_A ) lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size lowercase : str = get_size_dict(_A , param_name='''crop_size''' ) if not valid_images(_A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) lowercase : Union[str, Any] = make_batched(_A ) lowercase : Dict = [ [ self._preprocess_image( image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , ) for img in video ] for video in videos ] lowercase : Tuple = {'''pixel_values''': videos} return BatchFeature(data=_A , tensor_type=_A )
308
1
from __future__ import annotations from typing import Any def snake_case( __magic_name__ ) -> None: '''simple docstring''' create_state_space_tree(__magic_name__ , [] , 0 ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> None: '''simple docstring''' if index == len(__magic_name__ ): print(__magic_name__ ) return create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 ) current_subsequence.pop() if __name__ == "__main__": lowerCAmelCase_ = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(['A', 'B', 'C']) generate_all_subsequences(seq)
308
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(_lowerCamelCase ) , '''Tatoeba directory does not exist.''' ) class _A ( unittest.TestCase ): @cached_property def __a ( self : int ) -> Dict: """simple docstring""" lowercase : str = tempfile.mkdtemp() return TatoebaConverter(save_dir=_A ) @slow def __a ( self : Any ) -> List[Any]: """simple docstring""" self.resolver.convert_models(['''heb-eng'''] ) @slow def __a ( self : int ) -> Tuple: """simple docstring""" lowercase , lowercase : Optional[Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_A ) assert mmeta["long_pair"] == "heb-eng"
308
1
import os from datetime import datetime as dt from github import Github lowerCAmelCase_ = [ 'good first issue', 'good second issue', 'good difficult issue', 'enhancement', 'new pipeline/model', 'new scheduler', 'wip', ] def snake_case( ) -> Optional[Any]: '''simple docstring''' lowercase : Dict = Github(os.environ['''GITHUB_TOKEN'''] ) lowercase : Dict = g.get_repo('''huggingface/diffusers''' ) lowercase : List[str] = repo.get_issues(state='''open''' ) for issue in open_issues: lowercase : Union[str, Any] = sorted(issue.get_comments() , key=lambda __magic_name__ : i.created_at , reverse=__magic_name__ ) lowercase : Dict = comments[0] if len(__magic_name__ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='''closed''' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='''open''' ) issue.remove_from_labels('''stale''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) issue.add_to_labels('''stale''' ) if __name__ == "__main__": main()
308
from __future__ import annotations from typing import Any def snake_case( __magic_name__ ) -> None: '''simple docstring''' create_state_space_tree(__magic_name__ , [] , 0 ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> None: '''simple docstring''' if index == len(__magic_name__ ): print(__magic_name__ ) return create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 ) current_subsequence.pop() if __name__ == "__main__": lowerCAmelCase_ = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(['A', 'B', 'C']) generate_all_subsequences(seq)
308
1
lowerCAmelCase_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def snake_case( ) -> None: '''simple docstring''' lowercase : List[Any] = input('''Enter message: ''' ) lowercase : Dict = input('''Enter key [alphanumeric]: ''' ) lowercase : List[str] = input('''Encrypt/Decrypt [e/d]: ''' ) if mode.lower().startswith('''e''' ): lowercase : str = '''encrypt''' lowercase : int = encrypt_message(__magic_name__ , __magic_name__ ) elif mode.lower().startswith('''d''' ): lowercase : Tuple = '''decrypt''' lowercase : Optional[Any] = decrypt_message(__magic_name__ , __magic_name__ ) print(F"""\n{mode.title()}ed message:""" ) print(__magic_name__ ) def snake_case( __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' return translate_message(__magic_name__ , __magic_name__ , '''encrypt''' ) def snake_case( __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' return translate_message(__magic_name__ , __magic_name__ , '''decrypt''' ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' lowercase : Dict = [] lowercase : str = 0 lowercase : Tuple = key.upper() for symbol in message: lowercase : int = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(__magic_name__ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(__magic_name__ ): lowercase : Optional[int] = 0 else: translated.append(__magic_name__ ) return "".join(__magic_name__ ) if __name__ == "__main__": main()
308
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging lowerCAmelCase_ = logging.get_logger(__name__) class _A ( _lowerCamelCase ): _UpperCamelCase : Dict = ['''input_features'''] def __init__( self : int , _A : int=80 , _A : Union[str, Any]=16_000 , _A : Union[str, Any]=160 , _A : Any=30 , _A : str=400 , _A : Union[str, Any]=0.0 , _A : Tuple=False , **_A : List[str] , ) -> int: """simple docstring""" super().__init__( feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , ) lowercase : Optional[Any] = n_fft lowercase : Optional[int] = hop_length lowercase : Optional[int] = chunk_length lowercase : Union[str, Any] = chunk_length * sampling_rate lowercase : Optional[Any] = self.n_samples // hop_length lowercase : Optional[Any] = sampling_rate lowercase : Union[str, Any] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , ) def __a ( self : Dict , _A : np.array ) -> np.ndarray: """simple docstring""" lowercase : List[str] = spectrogram( _A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , ) lowercase : Union[str, Any] = log_spec[:, :-1] lowercase : Optional[Any] = np.maximum(_A , log_spec.max() - 8.0 ) lowercase : str = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]: """simple docstring""" if attention_mask is not None: lowercase : Optional[Any] = np.array(_A , np.intaa ) lowercase : List[str] = [] for vector, length in zip(_A , attention_mask.sum(-1 ) ): lowercase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: lowercase : int = padding_value normed_input_values.append(_A ) else: lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self : Union[str, Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : int , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) lowercase : Union[str, Any] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) lowercase : Optional[Any] = is_batched_numpy or ( isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowercase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_A , np.ndarray ): lowercase : List[Any] = np.asarray(_A , dtype=np.floataa ) elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase : List[str] = [np.asarray([raw_speech] ).T] lowercase : Tuple = BatchFeature({'''input_features''': raw_speech} ) # convert into correct format for padding lowercase : str = self.pad( _A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: lowercase : Tuple = self.zero_mean_unit_var_norm( padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , ) lowercase : str = np.stack(padded_inputs['''input_features'''] , axis=0 ) # make sure list is in array format lowercase : List[str] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 ) lowercase : str = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]] if isinstance(input_features[0] , _A ): lowercase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features] else: lowercase : Optional[int] = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) lowercase : List[str] = padded_inputs['''attention_mask'''][:, :: self.hop_length] if return_tensors is not None: lowercase : Any = padded_inputs.convert_to_tensors(_A ) return padded_inputs def __a ( self : Optional[Any] ) -> Dict[str, Any]: """simple docstring""" lowercase : Optional[Any] = copy.deepcopy(self.__dict__ ) lowercase : Dict = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
308
1
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'vocab_file': 'vocab.txt'} lowerCAmelCase_ = { 'vocab_file': { 'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt', }, } lowerCAmelCase_ = { 'openbmb/cpm-ant-10b': 10_24, } def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : Optional[int] = collections.OrderedDict() with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader: lowercase : str = reader.readlines() for index, token in enumerate(__magic_name__ ): lowercase : Union[str, Any] = token.rstrip('''\n''' ) lowercase : List[Any] = index return vocab class _A ( _lowerCamelCase ): def __init__( self : List[str] , _A : Any , _A : List[str]="<unk>" , _A : Union[str, Any]=200 ) -> List[Any]: """simple docstring""" lowercase : Optional[int] = vocab lowercase : List[str] = unk_token lowercase : Any = max_input_chars_per_word def __a ( self : List[str] , _A : Tuple ) -> str: """simple docstring""" lowercase : Dict = list(_A ) if len(_A ) > self.max_input_chars_per_word: return [self.unk_token] lowercase : int = 0 lowercase : Dict = [] while start < len(_A ): lowercase : Optional[Any] = len(_A ) lowercase : List[str] = None while start < end: lowercase : List[Any] = ''''''.join(chars[start:end] ) if substr in self.vocab: lowercase : Union[str, Any] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(_A ) lowercase : Dict = end return sub_tokens class _A ( _lowerCamelCase ): _UpperCamelCase : List[str] = VOCAB_FILES_NAMES _UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask'''] _UpperCamelCase : int = False def __init__( self : List[str] , _A : int , _A : Optional[Any]="<d>" , _A : Any="</d>" , _A : Optional[Any]="<s>" , _A : Any="</s>" , _A : Any="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]="</n>" , _A : List[str]="</_>" , _A : Optional[Any]="left" , **_A : str , ) -> Tuple: """simple docstring""" requires_backends(self , ['''jieba'''] ) super().__init__( bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , ) lowercase : str = bod_token lowercase : str = eod_token lowercase : Any = load_vocab(_A ) lowercase : List[Any] = self.encoder[space_token] lowercase : Tuple = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] lowercase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) ) lowercase : int = {v: k for k, v in self.encoder.items()} lowercase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def __a ( self : Dict ) -> Optional[int]: """simple docstring""" return self.encoder[self.bod_token] @property def __a ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" return self.encoder[self.eod_token] @property def __a ( self : List[str] ) -> List[str]: """simple docstring""" return self.encoder["\n"] @property def __a ( self : List[Any] ) -> int: """simple docstring""" return len(self.encoder ) def __a ( self : Union[str, Any] ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self : str , _A : List[str] ) -> Tuple: """simple docstring""" lowercase : int = [] for x in jieba.cut(_A , cut_all=_A ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) ) return output_tokens def __a ( self : List[Any] , _A : Tuple , **_A : Optional[int] ) -> Any: """simple docstring""" lowercase : List[str] = [i for i in token_ids if i >= 0] lowercase : Any = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(_A , **_A ) def __a ( self : List[Any] , _A : int ) -> Optional[Any]: """simple docstring""" return token in self.encoder def __a ( self : Dict , _A : List[str] ) -> str: """simple docstring""" return "".join(_A ) def __a ( self : List[str] , _A : List[str] ) -> Any: """simple docstring""" return self.encoder.get(_A , self.encoder.get(self.unk_token ) ) def __a ( self : Tuple , _A : Union[str, Any] ) -> Tuple: """simple docstring""" return self.decoder.get(_A , self.unk_token ) def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if os.path.isdir(_A ): lowercase : str = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: lowercase : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory lowercase : Any = 0 if " " in self.encoder: lowercase : List[Any] = self.encoder[''' '''] del self.encoder[" "] if "\n" in self.encoder: lowercase : Dict = self.encoder['''\n'''] del self.encoder["\n"] lowercase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) ) with open(_A , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) lowercase : Any = token_index writer.write(token + '''\n''' ) index += 1 return (vocab_file,) def __a ( self : str , _A : List[int] , _A : List[int] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is not None: return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) return [1] + ([0] * len(_A ))
308
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class _A : def __init__( self : int , _A : Optional[int] , _A : Any=13 , _A : List[Any]=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : str=True , _A : Any=True , _A : Dict=True , _A : Optional[Any]=False , _A : Any=False , _A : List[str]=False , _A : Optional[int]=2 , _A : List[Any]=99 , _A : str=0 , _A : Dict=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]="last" , _A : List[str]=True , _A : Tuple=None , _A : Optional[Any]=0 , ) -> Any: """simple docstring""" lowercase : str = parent lowercase : Optional[Any] = batch_size lowercase : Union[str, Any] = seq_length lowercase : str = is_training lowercase : str = use_input_lengths lowercase : List[Any] = use_token_type_ids lowercase : Union[str, Any] = use_labels lowercase : Tuple = gelu_activation lowercase : Dict = sinusoidal_embeddings lowercase : Any = causal lowercase : str = asm lowercase : Optional[Any] = n_langs lowercase : Dict = vocab_size lowercase : Dict = n_special lowercase : List[Any] = hidden_size lowercase : str = num_hidden_layers lowercase : int = num_attention_heads lowercase : str = hidden_dropout_prob lowercase : Dict = attention_probs_dropout_prob lowercase : List[Any] = max_position_embeddings lowercase : Optional[int] = type_sequence_label_size lowercase : List[str] = initializer_range lowercase : List[str] = num_labels lowercase : int = num_choices lowercase : int = summary_type lowercase : Tuple = use_proj lowercase : Union[str, Any] = scope lowercase : List[str] = bos_token_id def __a ( self : Any ) -> Dict: """simple docstring""" lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase : str = None if self.use_input_lengths: lowercase : int = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase : Union[str, Any] = None if self.use_token_type_ids: lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase : Union[str, Any] = None lowercase : List[str] = None lowercase : Optional[Any] = None if self.use_labels: lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float() lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices ) lowercase : List[Any] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __a ( self : Any ) -> List[Any]: """simple docstring""" return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def __a ( self : int , _A : str , _A : Optional[Any] , _A : int , _A : List[str] , _A : Any , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : Tuple , ) -> List[Any]: """simple docstring""" lowercase : List[Any] = XLMModel(config=_A ) model.to(_A ) model.eval() lowercase : Tuple = model(_A , lengths=_A , langs=_A ) lowercase : Dict = model(_A , langs=_A ) lowercase : int = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : int , _A : Dict , _A : int , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : Dict , ) -> Optional[Any]: """simple docstring""" lowercase : Optional[int] = XLMWithLMHeadModel(_A ) model.to(_A ) model.eval() lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Tuple , _A : int , ) -> Union[str, Any]: """simple docstring""" lowercase : Dict = XLMForQuestionAnsweringSimple(_A ) model.to(_A ) model.eval() lowercase : List[str] = model(_A ) lowercase : Any = model(_A , start_positions=_A , end_positions=_A ) lowercase : Any = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : Any , _A : str , _A : Union[str, Any] , ) -> Dict: """simple docstring""" lowercase : Optional[int] = XLMForQuestionAnswering(_A ) model.to(_A ) model.eval() lowercase : Any = model(_A ) lowercase : Tuple = model( _A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , ) lowercase : Optional[int] = model( _A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , ) ((lowercase) , ) : Optional[int] = result_with_labels.to_tuple() lowercase : List[str] = model(_A , start_positions=_A , end_positions=_A ) ((lowercase) , ) : Any = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def __a ( self : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , ) -> int: """simple docstring""" lowercase : List[str] = XLMForSequenceClassification(_A ) model.to(_A ) model.eval() lowercase : List[str] = model(_A ) lowercase : Union[str, Any] = model(_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Union[str, Any] , _A : str , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , ) -> Dict: """simple docstring""" lowercase : Optional[Any] = self.num_labels lowercase : Tuple = XLMForTokenClassification(_A ) model.to(_A ) model.eval() lowercase : str = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self : List[Any] , _A : List[str] , _A : Dict , _A : str , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , ) -> Union[str, Any]: """simple docstring""" lowercase : int = self.num_choices lowercase : List[Any] = XLMForMultipleChoice(config=_A ) model.to(_A ) model.eval() lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Dict = model( _A , attention_mask=_A , token_type_ids=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" lowercase : List[Any] = self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Union[str, Any] = config_and_inputs lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths} return config, inputs_dict @require_torch class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): _UpperCamelCase : Any = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _UpperCamelCase : str = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _UpperCamelCase : Tuple = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def __a ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] ) -> List[Any]: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __a ( self : Dict , _A : Tuple , _A : List[str] , _A : int=False ) -> Optional[Any]: """simple docstring""" lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": lowercase : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) lowercase : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) return inputs_dict def __a ( self : Any ) -> List[str]: """simple docstring""" lowercase : List[str] = XLMModelTester(self ) lowercase : Any = ConfigTester(self , config_class=_A , emb_dim=37 ) def __a ( self : List[Any] ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def __a ( self : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*_A ) def __a ( self : Any ) -> Dict: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*_A ) def __a ( self : List[str] ) -> Optional[int]: """simple docstring""" lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*_A ) def __a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*_A ) def __a ( self : List[str] ) -> Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*_A ) def __a ( self : Dict ) -> int: """simple docstring""" lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*_A ) def __a ( self : Any ) -> List[Any]: """simple docstring""" lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*_A ) def __a ( self : int , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any: """simple docstring""" self.assertIsInstance(_A , _A ) self.assertListEqual( [isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) ) self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(_A ): # adds PAD dummy token lowercase : List[Any] = min_length + idx + 1 lowercase : str = min_length + idx + 1 lowercase : Any = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) ) def __a ( self : int , _A : Optional[int] , _A : Dict , _A : Any , _A : List[str] , _A : Optional[int] , _A : List[Any]=False , _A : List[Any]=1 ) -> str: """simple docstring""" self.assertIsInstance(_A , _A ) self.assertListEqual( [isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , ) self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(_A ): # adds PAD dummy token lowercase : Union[str, Any] = min_length + idx + 1 lowercase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , ) pass @slow def __a ( self : Optional[int] ) -> Any: """simple docstring""" for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : Any = XLMModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch class _A ( unittest.TestCase ): @slow def __a ( self : Any ) -> Optional[Any]: """simple docstring""" lowercase : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' ) model.to(_A ) lowercase : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president lowercase : List[str] = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference lowercase : Dict = model.generate(_A , do_sample=_A ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
308
1
import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class _A : @staticmethod def __a ( *_A : int , **_A : Union[str, Any] ) -> List[Any]: """simple docstring""" pass def snake_case( __magic_name__ ) -> str: '''simple docstring''' lowercase : str = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class _A ( unittest.TestCase ): _UpperCamelCase : List[str] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def __a ( self : List[Any] , _A : List[Any] , _A : Dict , _A : Dict ) -> Optional[int]: """simple docstring""" lowercase : List[str] = DepthEstimationPipeline(model=_A , image_processor=_A ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def __a ( self : Dict , _A : int , _A : int ) -> Optional[int]: """simple docstring""" lowercase : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _A ) import datasets lowercase : Tuple = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) lowercase : List[str] = depth_estimator( [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] ) self.assertEqual( [ {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, ] , _A , ) @require_tf @unittest.skip('''Depth estimation is not implemented in TF''' ) def __a ( self : int ) -> Tuple: """simple docstring""" pass @slow @require_torch def __a ( self : int ) -> List[Any]: """simple docstring""" lowercase : Any = '''Intel/dpt-large''' lowercase : Optional[int] = pipeline('''depth-estimation''' , model=_A ) lowercase : str = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) lowercase : int = hashimage(outputs['''depth'''] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 ) @require_torch def __a ( self : Optional[int] ) -> List[str]: """simple docstring""" self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
308
def snake_case( __magic_name__ = 50 ) -> int: '''simple docstring''' lowercase : Union[str, Any] = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
308
1
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def snake_case( ) -> Dict: '''simple docstring''' lowercase : Any = argparse.ArgumentParser() parser.add_argument( '''-m''' , '''--pretrained_model_name_or_path''' , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , ) parser.add_argument( '''-c''' , '''--caption''' , type=__magic_name__ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , ) parser.add_argument( '''-n''' , '''--images_num''' , type=__magic_name__ , default=4 , help='''How much images to generate.''' , ) parser.add_argument( '''-s''' , '''--seed''' , type=__magic_name__ , default=42 , help='''Seed for random process.''' , ) parser.add_argument( '''-ci''' , '''--cuda_id''' , type=__magic_name__ , default=0 , help='''cuda_id.''' , ) lowercase : Any = parser.parse_args() return args def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple: '''simple docstring''' if not len(__magic_name__ ) == rows * cols: raise ValueError('''The specified number of rows and columns are not correct.''' ) lowercase , lowercase : str = imgs[0].size lowercase : Tuple = Image.new('''RGB''' , size=(cols * w, rows * h) ) lowercase , lowercase : int = grid.size for i, img in enumerate(__magic_name__ ): grid.paste(__magic_name__ , box=(i % cols * w, i // cols * h) ) return grid def snake_case( __magic_name__ , __magic_name__="robotic cat with wings" , __magic_name__=7.5 , __magic_name__=50 , __magic_name__=1 , __magic_name__=42 , ) -> Any: '''simple docstring''' lowercase : str = torch.Generator(pipeline.device ).manual_seed(__magic_name__ ) lowercase : Dict = pipeline( __magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=__magic_name__ , generator=__magic_name__ , num_images_per_prompt=__magic_name__ , ).images lowercase : int = int(math.sqrt(__magic_name__ ) ) lowercase : str = image_grid(__magic_name__ , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images lowerCAmelCase_ = parse_args() # Load models and create wrapper for stable diffusion lowerCAmelCase_ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer') lowerCAmelCase_ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder') lowerCAmelCase_ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae') lowerCAmelCase_ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet') lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) lowerCAmelCase_ = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')): lowerCAmelCase_ = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, 'unet', unet) else: lowerCAmelCase_ = unet.to(torch.device('cuda', args.cuda_id)) lowerCAmelCase_ = pipeline.to(unet.device) lowerCAmelCase_ , lowerCAmelCase_ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split())))) lowerCAmelCase_ = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
308
import os def snake_case( __magic_name__ = "input.txt" ) -> int: '''simple docstring''' with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file: lowercase : Any = [ [int(__magic_name__ ) for element in line.split(''',''' )] for line in input_file.readlines() ] lowercase : List[Any] = len(__magic_name__ ) lowercase : Any = len(matrix[0] ) lowercase : Tuple = [[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )] for i in range(__magic_name__ ): lowercase : str = matrix[i][0] for j in range(1 , __magic_name__ ): for i in range(__magic_name__ ): lowercase : Any = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , __magic_name__ ): lowercase : Any = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowercase : Any = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'''{solution() = }''')
308
1
import logging import os import threading import time try: import warnings except ImportError: lowerCAmelCase_ = None try: import msvcrt except ImportError: lowerCAmelCase_ = None try: import fcntl except ImportError: lowerCAmelCase_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: lowerCAmelCase_ = OSError # Data # ------------------------------------------------ lowerCAmelCase_ = [ 'Timeout', 'BaseFileLock', 'WindowsFileLock', 'UnixFileLock', 'SoftFileLock', 'FileLock', ] lowerCAmelCase_ = '3.0.12' lowerCAmelCase_ = None def snake_case( ) -> int: '''simple docstring''' global _logger lowercase : Optional[int] = _logger or logging.getLogger(__name__ ) return _logger class _A ( _lowerCamelCase ): def __init__( self : str , _A : Dict ) -> Optional[int]: """simple docstring""" lowercase : int = lock_file return None def __str__( self : Optional[Any] ) -> Tuple: """simple docstring""" lowercase : str = f"""The file lock '{self.lock_file}' could not be acquired.""" return temp class _A : def __init__( self : Union[str, Any] , _A : int ) -> Union[str, Any]: """simple docstring""" lowercase : Dict = lock return None def __enter__( self : Optional[Any] ) -> str: """simple docstring""" return self.lock def __exit__( self : Union[str, Any] , _A : Any , _A : Optional[Any] , _A : int ) -> Any: """simple docstring""" self.lock.release() return None class _A : def __init__( self : Union[str, Any] , _A : Optional[int] , _A : Optional[int]=-1 , _A : int=None ) -> Optional[int]: """simple docstring""" lowercase : List[str] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long lowercase : Dict = self.hash_filename_if_too_long(_A , _A ) # The path to the lock file. lowercase : Union[str, Any] = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. lowercase : str = None # The default timeout value. lowercase : Optional[int] = timeout # We use this lock primarily for the lock counter. lowercase : List[str] = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. lowercase : Union[str, Any] = 0 return None @property def __a ( self : Optional[Any] ) -> Dict: """simple docstring""" return self._lock_file @property def __a ( self : Tuple ) -> Optional[Any]: """simple docstring""" return self._timeout @timeout.setter def __a ( self : str , _A : Optional[Any] ) -> Optional[int]: """simple docstring""" lowercase : List[str] = float(_A ) return None def __a ( self : List[str] ) -> Dict: """simple docstring""" raise NotImplementedError() def __a ( self : Optional[Any] ) -> Tuple: """simple docstring""" raise NotImplementedError() @property def __a ( self : int ) -> Optional[int]: """simple docstring""" return self._lock_file_fd is not None def __a ( self : List[Any] , _A : str=None , _A : int=0.05 ) -> List[str]: """simple docstring""" if timeout is None: lowercase : Union[str, Any] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 lowercase : Dict = id(self ) lowercase : Dict = self._lock_file lowercase : int = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(_A ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: lowercase : Tuple = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def __a ( self : Optional[Any] , _A : str=False ) -> Union[str, Any]: """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: lowercase : List[str] = id(self ) lowercase : Union[str, Any] = self._lock_file logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() lowercase : Union[str, Any] = 0 logger().debug(f"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self : str ) -> Tuple: """simple docstring""" self.acquire() return self def __exit__( self : Any , _A : str , _A : Any , _A : str ) -> int: """simple docstring""" self.release() return None def __del__( self : int ) -> int: """simple docstring""" self.release(force=_A ) return None def __a ( self : int , _A : str , _A : int ) -> str: """simple docstring""" lowercase : Union[str, Any] = os.path.basename(_A ) if len(_A ) > max_length and max_length > 0: lowercase : str = os.path.dirname(_A ) lowercase : List[Any] = str(hash(_A ) ) lowercase : str = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(_A , _A ) else: return path class _A ( _lowerCamelCase ): def __init__( self : Dict , _A : List[Any] , _A : int=-1 , _A : Optional[Any]=None ) -> Any: """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(_A , timeout=_A , max_filename_length=_A ) lowercase : Optional[Any] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def __a ( self : Any ) -> Optional[Any]: """simple docstring""" lowercase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: lowercase : Union[str, Any] = os.open(self._lock_file , _A ) except OSError: pass else: try: msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_A ) else: lowercase : str = fd return None def __a ( self : Tuple ) -> Tuple: """simple docstring""" lowercase : Optional[int] = self._lock_file_fd lowercase : Optional[int] = None msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 ) os.close(_A ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class _A ( _lowerCamelCase ): def __init__( self : Tuple , _A : Dict , _A : List[Any]=-1 , _A : Optional[int]=None ) -> str: """simple docstring""" lowercase : List[Any] = os.statvfs(os.path.dirname(_A ) ).f_namemax super().__init__(_A , timeout=_A , max_filename_length=_A ) def __a ( self : List[Any] ) -> str: """simple docstring""" lowercase : Dict = os.O_RDWR | os.O_CREAT | os.O_TRUNC lowercase : Dict = os.open(self._lock_file , _A ) try: fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_A ) else: lowercase : int = fd return None def __a ( self : Optional[int] ) -> List[str]: """simple docstring""" lowercase : List[str] = self._lock_file_fd lowercase : Dict = None fcntl.flock(_A , fcntl.LOCK_UN ) os.close(_A ) return None class _A ( _lowerCamelCase ): def __a ( self : Any ) -> Dict: """simple docstring""" lowercase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: lowercase : int = os.open(self._lock_file , _A ) except OSError: pass else: lowercase : Any = fd return None def __a ( self : Optional[int] ) -> Any: """simple docstring""" os.close(self._lock_file_fd ) lowercase : List[Any] = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None lowerCAmelCase_ = None if msvcrt: lowerCAmelCase_ = WindowsFileLock elif fcntl: lowerCAmelCase_ = UnixFileLock else: lowerCAmelCase_ = SoftFileLock if warnings is not None: warnings.warn('only soft file lock is available')
308
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): @slow def __a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' ) lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' ) lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids lowercase : List[Any] = model(_A , labels=_A ).loss lowercase : Dict = -tf.math.reduce_mean(_A ).numpy() lowercase : Union[str, Any] = -21.228_168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
308
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FocalNetForImageClassification', 'FocalNetForMaskedImageModeling', 'FocalNetBackbone', 'FocalNetModel', 'FocalNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
308
from heapq import heappop, heappush import numpy as np def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> tuple[float | int, list[tuple[int, int]]]: '''simple docstring''' lowercase , lowercase : Optional[int] = grid.shape lowercase : Optional[int] = [-1, 1, 0, 0] lowercase : List[str] = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] lowercase , lowercase : Union[str, Any] = [(0, source)], set() lowercase : List[str] = np.full((rows, cols) , np.inf ) lowercase : Dict = 0 lowercase : Dict = np.empty((rows, cols) , dtype=__magic_name__ ) lowercase : Any = None while queue: ((lowercase) , (lowercase)) : Optional[Any] = heappop(__magic_name__ ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: lowercase : Tuple = [] while (x, y) != source: path.append((x, y) ) lowercase , lowercase : Optional[int] = predecessors[x, y] path.append(__magic_name__ ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(__magic_name__ ) ): lowercase , lowercase : Optional[int] = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: lowercase : List[Any] = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(__magic_name__ , (dist + 1, (nx, ny)) ) lowercase : int = dist + 1 lowercase : Optional[Any] = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
308
1
import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class _A ( unittest.TestCase ): def __init__( self : Union[str, Any] , _A : Union[str, Any] ) -> List[Any]: """simple docstring""" lowercase : List[str] = parent def __a ( self : int ) -> str: """simple docstring""" return {} def snake_case( ) -> Union[str, Any]: '''simple docstring''' lowercase : Tuple = '''<HTML> <HEAD> <TITLE>sample document</TITLE> </HEAD> <BODY BGCOLOR="FFFFFF"> <HR> <a href="http://google.com">Goog</a> <H1>This is one header</H1> <H2>This is a another Header</H2> <P>Travel from <P> <B>SFO to JFK</B> <BR> <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B> <HR> <div style="color:#0000FF"> <h3>Traveler <b> name </b> is <p> John Doe </p> </div>''' lowercase : Any = ''' <!DOCTYPE html> <html> <body> <h1>My First Heading</h1> <p>My first paragraph.</p> </body> </html> ''' return [html_string_a, html_string_a] @require_bsa class _A ( _lowerCamelCase , unittest.TestCase ): _UpperCamelCase : Any = MarkupLMFeatureExtractor if is_bsa_available() else None def __a ( self : List[Any] ) -> Dict: """simple docstring""" lowercase : Optional[Any] = MarkupLMFeatureExtractionTester(self ) @property def __a ( self : int ) -> Union[str, Any]: """simple docstring""" return self.feature_extract_tester.prepare_feat_extract_dict() def __a ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" lowercase : Any = self.feature_extraction_class() # Test not batched input lowercase : Tuple = get_html_strings()[0] lowercase : List[Any] = feature_extractor(_A ) # fmt: off lowercase : int = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']] lowercase : List[str] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']] # fmt: on self.assertEqual(encoding.nodes , _A ) self.assertEqual(encoding.xpaths , _A ) # Test batched lowercase : Optional[Any] = get_html_strings() lowercase : Optional[int] = feature_extractor(_A ) # fmt: off lowercase : int = expected_nodes + [['''My First Heading''', '''My first paragraph.''']] lowercase : int = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']] self.assertEqual(len(encoding.nodes ) , 2 ) self.assertEqual(len(encoding.xpaths ) , 2 ) self.assertEqual(encoding.nodes , _A ) self.assertEqual(encoding.xpaths , _A )
308
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase_ = { 'configuration_mask2former': [ 'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Mask2FormerConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['Mask2FormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'Mask2FormerForUniversalSegmentation', 'Mask2FormerModel', 'Mask2FormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
308
1
from __future__ import annotations import math from collections.abc import Callable def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 1_00 , ) -> float: '''simple docstring''' lowercase : Any = x_start lowercase : Tuple = fnc(__magic_name__ ) lowercase : List[str] = 0.0 for _ in range(__magic_name__ ): # Approximates curve as a sequence of linear lines and sums their length lowercase : int = (x_end - x_start) / steps + xa lowercase : Tuple = fnc(__magic_name__ ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step lowercase : List[Any] = xa lowercase : Optional[int] = fxa return length if __name__ == "__main__": def snake_case( __magic_name__ ) -> Union[str, Any]: '''simple docstring''' return math.sin(10 * x ) print('f(x) = sin(10 * x)') print('The length of the curve from x = -10 to x = 10 is:') lowerCAmelCase_ = 10 while i <= 10_00_00: print(f'''With {i} steps: {line_length(f, -10, 10, i)}''') i *= 10
308
def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : List[Any] = abs(__magic_name__ ) lowercase : Optional[Any] = 0 while n > 0: res += n % 10 n //= 10 return res def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : Optional[int] = abs(__magic_name__ ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def snake_case( __magic_name__ ) -> int: '''simple docstring''' return sum(int(__magic_name__ ) for c in str(abs(__magic_name__ ) ) ) def snake_case( ) -> None: '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(__magic_name__ , __magic_name__ ) -> None: lowercase : str = F"""{func.__name__}({value})""" lowercase : Any = timeit(F"""__main__.{call}""" , setup='''import __main__''' ) print(F"""{call:56} = {func(__magic_name__ )} -- {timing:.4f} seconds""" ) for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(__magic_name__ , __magic_name__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
308
1
def snake_case( __magic_name__ ) -> str: '''simple docstring''' return " ".join( ''''''.join(word[::-1] ) if len(__magic_name__ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
308
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def snake_case( ) -> List[str]: '''simple docstring''' lowercase : Any = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=__magic_name__ ) lowercase : Optional[Any] = parser.add_subparsers(help='''accelerate command helpers''' ) # Register commands get_config_parser(subparsers=__magic_name__ ) env_command_parser(subparsers=__magic_name__ ) launch_command_parser(subparsers=__magic_name__ ) tpu_command_parser(subparsers=__magic_name__ ) test_command_parser(subparsers=__magic_name__ ) # Let's go lowercase : Dict = parser.parse_args() if not hasattr(__magic_name__ , '''func''' ): parser.print_help() exit(1 ) # Run args.func(__magic_name__ ) if __name__ == "__main__": main()
308
1
def snake_case( __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' assert x is not None assert y is not None lowercase : Any = len(__magic_name__ ) lowercase : int = len(__magic_name__ ) # declaring the array for storing the dp values lowercase : int = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741 for i in range(1 , m + 1 ): for j in range(1 , n + 1 ): lowercase : int = 1 if x[i - 1] == y[j - 1] else 0 lowercase : Optional[Any] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match ) lowercase : Dict = '''''' lowercase , lowercase : str = m, n while i > 0 and j > 0: lowercase : Optional[int] = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: lowercase : Union[str, Any] = x[i - 1] + seq i -= 1 j -= 1 elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 return l[m][n], seq if __name__ == "__main__": lowerCAmelCase_ = 'AGGTAB' lowerCAmelCase_ = 'GXTXAYB' lowerCAmelCase_ = 4 lowerCAmelCase_ = 'GTAB' lowerCAmelCase_ , lowerCAmelCase_ = longest_common_subsequence(a, b) print('len =', ln, ', sub-sequence =', subseq) import doctest doctest.testmod()
308
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def snake_case( __magic_name__ , __magic_name__=False ) -> List[str]: '''simple docstring''' lowercase : List[Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''module.cls_token''', '''vit.embeddings.cls_token'''), ('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''module.pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''module.norm.weight''', '''layernorm.weight'''), ('''module.norm.bias''', '''layernorm.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowercase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def snake_case( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: lowercase : Optional[int] = '''''' else: lowercase : List[Any] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase : Tuple = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" ) lowercase : List[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowercase : Tuple = in_proj_weight[ : config.hidden_size, : ] lowercase : str = in_proj_bias[: config.hidden_size] lowercase : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase : Dict = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase : Any = in_proj_weight[ -config.hidden_size :, : ] lowercase : Optional[int] = in_proj_bias[-config.hidden_size :] def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : str = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__magic_name__ , __magic_name__ ) def snake_case( __magic_name__ ) -> Tuple: '''simple docstring''' lowercase : Any = [ '''module.fc.fc1.weight''', '''module.fc.fc1.bias''', '''module.fc.bn1.weight''', '''module.fc.bn1.bias''', '''module.fc.bn1.running_mean''', '''module.fc.bn1.running_var''', '''module.fc.bn1.num_batches_tracked''', '''module.fc.fc2.weight''', '''module.fc.fc2.bias''', '''module.fc.bn2.weight''', '''module.fc.bn2.bias''', '''module.fc.bn2.running_mean''', '''module.fc.bn2.running_var''', '''module.fc.bn2.num_batches_tracked''', '''module.fc.fc3.weight''', '''module.fc.fc3.bias''', ] for k in ignore_keys: state_dict.pop(__magic_name__ , __magic_name__ ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' lowercase : List[Any] = dct.pop(__magic_name__ ) lowercase : Union[str, Any] = val def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' lowercase : Optional[Any] = ViTMSNConfig() lowercase : str = 10_00 lowercase : List[str] = '''datasets/huggingface/label-files''' lowercase : List[str] = '''imagenet-1k-id2label.json''' lowercase : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ ) , '''r''' ) ) lowercase : Union[str, Any] = {int(__magic_name__ ): v for k, v in idalabel.items()} lowercase : Any = idalabel lowercase : List[Any] = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: lowercase : int = 3_84 lowercase : Optional[Any] = 15_36 lowercase : Tuple = 6 elif "l16" in checkpoint_url: lowercase : Union[str, Any] = 10_24 lowercase : List[str] = 40_96 lowercase : int = 24 lowercase : Union[str, Any] = 16 lowercase : Tuple = 0.1 elif "b4" in checkpoint_url: lowercase : Union[str, Any] = 4 elif "l7" in checkpoint_url: lowercase : Dict = 7 lowercase : List[Any] = 10_24 lowercase : str = 40_96 lowercase : int = 24 lowercase : Dict = 16 lowercase : Tuple = 0.1 lowercase : int = ViTMSNModel(__magic_name__ ) lowercase : List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''target_encoder'''] lowercase : Any = ViTImageProcessor(size=config.image_size ) remove_projection_head(__magic_name__ ) lowercase : List[str] = create_rename_keys(__magic_name__ , base_model=__magic_name__ ) for src, dest in rename_keys: rename_key(__magic_name__ , __magic_name__ , __magic_name__ ) read_in_q_k_v(__magic_name__ , __magic_name__ , base_model=__magic_name__ ) model.load_state_dict(__magic_name__ ) model.eval() lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) lowercase : Dict = ViTImageProcessor( size=config.image_size , image_mean=__magic_name__ , image_std=__magic_name__ ) lowercase : List[str] = image_processor(images=__magic_name__ , return_tensors='''pt''' ) # forward pass torch.manual_seed(2 ) lowercase : int = model(**__magic_name__ ) lowercase : Optional[Any] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: lowercase : List[str] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] ) elif "b16" in checkpoint_url: lowercase : Any = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] ) elif "l16" in checkpoint_url: lowercase : Dict = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] ) elif "b4" in checkpoint_url: lowercase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] ) else: lowercase : Optional[int] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , __magic_name__ , atol=1e-4 ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(__magic_name__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__magic_name__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar', type=str, help='URL of the checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowerCAmelCase_ = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
308
1
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging lowerCAmelCase_ = logging.get_logger(__name__) class _A ( _lowerCamelCase ): _UpperCamelCase : Dict = ['''input_features'''] def __init__( self : int , _A : int=80 , _A : Union[str, Any]=16_000 , _A : Union[str, Any]=160 , _A : Any=30 , _A : str=400 , _A : Union[str, Any]=0.0 , _A : Tuple=False , **_A : List[str] , ) -> int: """simple docstring""" super().__init__( feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , ) lowercase : Optional[Any] = n_fft lowercase : Optional[int] = hop_length lowercase : Optional[int] = chunk_length lowercase : Union[str, Any] = chunk_length * sampling_rate lowercase : Optional[Any] = self.n_samples // hop_length lowercase : Optional[Any] = sampling_rate lowercase : Union[str, Any] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , ) def __a ( self : Dict , _A : np.array ) -> np.ndarray: """simple docstring""" lowercase : List[str] = spectrogram( _A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , ) lowercase : Union[str, Any] = log_spec[:, :-1] lowercase : Optional[Any] = np.maximum(_A , log_spec.max() - 8.0 ) lowercase : str = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]: """simple docstring""" if attention_mask is not None: lowercase : Optional[Any] = np.array(_A , np.intaa ) lowercase : List[str] = [] for vector, length in zip(_A , attention_mask.sum(-1 ) ): lowercase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: lowercase : int = padding_value normed_input_values.append(_A ) else: lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self : Union[str, Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : int , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) lowercase : Union[str, Any] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) lowercase : Optional[Any] = is_batched_numpy or ( isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowercase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_A , np.ndarray ): lowercase : List[Any] = np.asarray(_A , dtype=np.floataa ) elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase : List[str] = [np.asarray([raw_speech] ).T] lowercase : Tuple = BatchFeature({'''input_features''': raw_speech} ) # convert into correct format for padding lowercase : str = self.pad( _A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: lowercase : Tuple = self.zero_mean_unit_var_norm( padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , ) lowercase : str = np.stack(padded_inputs['''input_features'''] , axis=0 ) # make sure list is in array format lowercase : List[str] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 ) lowercase : str = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]] if isinstance(input_features[0] , _A ): lowercase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features] else: lowercase : Optional[int] = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) lowercase : List[str] = padded_inputs['''attention_mask'''][:, :: self.hop_length] if return_tensors is not None: lowercase : Any = padded_inputs.convert_to_tensors(_A ) return padded_inputs def __a ( self : Optional[Any] ) -> Dict[str, Any]: """simple docstring""" lowercase : Optional[Any] = copy.deepcopy(self.__dict__ ) lowercase : Dict = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
308
def snake_case( __magic_name__ , __magic_name__ ) -> float: '''simple docstring''' return price * (1 + tax_rate) if __name__ == "__main__": print(f'''{price_plus_tax(1_00, 0.2_5) = }''') print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
308
1
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class _A ( _lowerCamelCase ): _UpperCamelCase : Union[List[PIL.Image.Image], np.ndarray] _UpperCamelCase : Optional[List[bool]] _UpperCamelCase : Optional[List[bool]] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
308
import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class _A ( _lowerCamelCase ): def __init__( self : Tuple , _A : Dict , _A : Tuple , _A : List[Any]=1_024 , _A : str=1_024 , _A : str=3.6 ) -> Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = tokenizer lowercase : List[Any] = tokenizer.bos_token_id lowercase : Union[str, Any] = dataset lowercase : Union[str, Any] = seq_length lowercase : Optional[int] = seq_length * chars_per_token * num_of_sequences def __iter__( self : int ) -> int: """simple docstring""" lowercase : Dict = iter(self.dataset ) lowercase : Union[str, Any] = True while more_examples: lowercase , lowercase : Tuple = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(_A )['''content'''] ) buffer_len += len(buffer[-1] ) except StopIteration: lowercase : List[str] = False break lowercase : str = tokenizer(_A , truncation=_A )['''input_ids'''] lowercase : List[str] = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(_A ) , self.seq_length ): lowercase : int = all_token_ids[i : i + self.seq_length] if len(_A ) == self.seq_length: yield torch.tensor(_A ) def snake_case( __magic_name__ ) -> Optional[Any]: '''simple docstring''' lowercase : List[str] = {'''streaming''': True} lowercase : Dict = load_dataset(args.dataset_name , split='''train''' , **__magic_name__ ) lowercase : int = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length ) lowercase : Tuple = DataLoader(__magic_name__ , batch_size=args.batch_size ) return eval_dataloader def snake_case( __magic_name__ ) -> str: '''simple docstring''' model.eval() lowercase : str = [] for step, batch in enumerate(__magic_name__ ): with torch.no_grad(): lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ ) lowercase : List[Any] = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(__magic_name__ ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break lowercase : Union[str, Any] = torch.mean(torch.cat(__magic_name__ ) ) try: lowercase : Tuple = torch.exp(__magic_name__ ) except OverflowError: lowercase : List[str] = float('''inf''' ) return loss.item(), perplexity.item() # Setup Accelerator lowerCAmelCase_ = Accelerator() # Parse configuration lowerCAmelCase_ = HfArgumentParser(EvaluationArguments) lowerCAmelCase_ = parser.parse_args() set_seed(args.seed) # Logging lowerCAmelCase_ = logging.getLogger(__name__) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) # Load model and tokenizer lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt) lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader lowerCAmelCase_ = create_dataloader(args) # Prepare everything with our `accelerator`. lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('Evaluating and saving model after training') lowerCAmelCase_ , lowerCAmelCase_ = evaluate(args) logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
308
1
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class _A ( datasets.BeamBasedBuilder ): def __a ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return datasets.DatasetInfo( features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_A , ) def __a ( self : Union[str, Any] , _A : Any , _A : Tuple ) -> Tuple: """simple docstring""" return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )] def __a ( self : Optional[Any] , _A : Any , _A : List[str] ) -> int: """simple docstring""" import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_A ) class _A ( datasets.BeamBasedBuilder ): def __a ( self : Dict ) -> List[str]: """simple docstring""" return datasets.DatasetInfo( features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_A , ) def __a ( self : Optional[int] , _A : Optional[Any] , _A : Union[str, Any] ) -> str: """simple docstring""" return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} ) ] def __a ( self : str , _A : Optional[int] , _A : Any ) -> Union[str, Any]: """simple docstring""" import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_A ) def snake_case( ) -> Optional[Any]: '''simple docstring''' return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] def snake_case( ) -> Tuple: '''simple docstring''' return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] class _A ( _lowerCamelCase ): @require_beam def __a ( self : Tuple ) -> Any: """simple docstring""" lowercase : Tuple = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: lowercase : Any = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) lowercase : Dict = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A ) self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def __a ( self : List[str] ) -> Union[str, Any]: """simple docstring""" import apache_beam as beam lowercase : Union[str, Any] = beam.io.parquetio.WriteToParquet lowercase : Any = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: lowercase : Optional[Any] = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' ) with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock: lowercase : Dict = partial(_A , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( _A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertTrue( os.path.exists( os.path.join( _A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) lowercase : Union[str, Any] = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) ) self.assertTrue( os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def __a ( self : List[str] ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_cache_dir: lowercase : Dict = DummyBeamDataset(cache_dir=_A ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def __a ( self : Tuple ) -> List[Any]: """simple docstring""" lowercase : Optional[Any] = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: lowercase : str = NestedBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) ) lowercase : Optional[Any] = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A ) self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset
308
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def snake_case( __magic_name__ ) -> Optional[Any]: '''simple docstring''' assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def snake_case( ) -> Optional[Any]: '''simple docstring''' assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def snake_case( ) -> int: '''simple docstring''' lowercase : List[str] = '''mock-s3-bucket''' lowercase : Optional[int] = F"""s3://{mock_bucket}""" lowercase : List[Any] = extract_path_from_uri(__magic_name__ ) assert dataset_path.startswith('''s3://''' ) is False lowercase : Optional[int] = '''./local/path''' lowercase : Dict = extract_path_from_uri(__magic_name__ ) assert dataset_path == new_dataset_path def snake_case( __magic_name__ ) -> Optional[Any]: '''simple docstring''' lowercase : Tuple = is_remote_filesystem(__magic_name__ ) assert is_remote is True lowercase : int = fsspec.filesystem('''file''' ) lowercase : Optional[Any] = is_remote_filesystem(__magic_name__ ) assert is_remote is False @pytest.mark.parametrize('''compression_fs_class''' , __magic_name__ ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]: '''simple docstring''' lowercase : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file} lowercase : List[Any] = input_paths[compression_fs_class.protocol] if input_path is None: lowercase : Dict = F"""for '{compression_fs_class.protocol}' compression protocol, """ if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(__magic_name__ ) lowercase : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__magic_name__ ) assert isinstance(__magic_name__ , __magic_name__ ) lowercase : List[Any] = os.path.basename(__magic_name__ ) lowercase : Tuple = expected_filename[: expected_filename.rindex('''.''' )] assert fs.glob('''*''' ) == [expected_filename] with fs.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f, open(__magic_name__ , encoding='''utf-8''' ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]: '''simple docstring''' lowercase : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path} lowercase : List[str] = compressed_file_paths[protocol] lowercase : str = '''dataset.jsonl''' lowercase : List[str] = F"""{protocol}://{member_file_path}::{compressed_file_path}""" lowercase , *lowercase : Tuple = fsspec.get_fs_token_paths(__magic_name__ ) assert fs.isfile(__magic_name__ ) assert not fs.isfile('''non_existing_''' + member_file_path ) @pytest.mark.integration def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' lowercase : Optional[Any] = hf_api.dataset_info(__magic_name__ , token=__magic_name__ ) lowercase : int = HfFileSystem(repo_info=__magic_name__ , token=__magic_name__ ) assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"] assert hffs.isdir('''data''' ) assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' ) with open(__magic_name__ ) as f: assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read() def snake_case( ) -> List[Any]: '''simple docstring''' lowercase : List[Any] = '''bz2''' # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(__magic_name__ , __magic_name__ , clobber=__magic_name__ ) with pytest.warns(__magic_name__ ) as warning_info: importlib.reload(datasets.filesystems ) assert len(__magic_name__ ) == 1 assert ( str(warning_info[0].message ) == F"""A filesystem protocol was already set for {protocol} and will be overwritten.""" )
308
1
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class _A ( unittest.TestCase ): _UpperCamelCase : List[str] = MODEL_FOR_CAUSAL_LM_MAPPING _UpperCamelCase : Optional[Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def __a ( self : List[str] ) -> str: """simple docstring""" lowercase : Union[str, Any] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output lowercase : Tuple = text_generator('''This is a test''' , do_sample=_A ) self.assertEqual( _A , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) lowercase : Optional[int] = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( _A , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) lowercase : str = text_generator('''This is a test''' , do_sample=_A , num_return_sequences=2 , return_tensors=_A ) self.assertEqual( _A , [ {'''generated_token_ids''': ANY(_A )}, {'''generated_token_ids''': ANY(_A )}, ] , ) lowercase : List[Any] = text_generator.model.config.eos_token_id lowercase : Dict = '''<pad>''' lowercase : Union[str, Any] = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=_A , num_return_sequences=2 , batch_size=2 , return_tensors=_A , ) self.assertEqual( _A , [ [ {'''generated_token_ids''': ANY(_A )}, {'''generated_token_ids''': ANY(_A )}, ], [ {'''generated_token_ids''': ANY(_A )}, {'''generated_token_ids''': ANY(_A )}, ], ] , ) @require_tf def __a ( self : Any ) -> Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output lowercase : List[Any] = text_generator('''This is a test''' , do_sample=_A ) self.assertEqual( _A , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) lowercase : Optional[Any] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_A ) self.assertEqual( _A , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def __a ( self : Optional[Any] , _A : Optional[int] , _A : str , _A : Tuple ) -> List[Any]: """simple docstring""" lowercase : Optional[Any] = TextGenerationPipeline(model=_A , tokenizer=_A ) return text_generator, ["This is a test", "Another test"] def __a ( self : Optional[int] ) -> Any: """simple docstring""" lowercase : Union[str, Any] = '''Hello I believe in''' lowercase : List[str] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) lowercase : Optional[Any] = text_generator(_A ) self.assertEqual( _A , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) lowercase : str = text_generator(_A , stop_sequence=''' fe''' ) self.assertEqual(_A , [{'''generated_text''': '''Hello I believe in fe'''}] ) def __a ( self : Tuple , _A : List[str] , _A : List[Any] ) -> List[Any]: """simple docstring""" lowercase : Tuple = text_generator.model lowercase : Optional[Any] = text_generator.tokenizer lowercase : int = text_generator('''This is a test''' ) self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowercase : Tuple = text_generator('''This is a test''' , return_full_text=_A ) self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowercase : List[Any] = pipeline(task='''text-generation''' , model=_A , tokenizer=_A , return_full_text=_A ) lowercase : Tuple = text_generator('''This is a test''' ) self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowercase : Optional[int] = text_generator('''This is a test''' , return_full_text=_A ) self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowercase : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_A ) self.assertEqual( _A , [ [{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}], [{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowercase : List[str] = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_A ) self.assertEqual( _A , [ [{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}], [{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}], ] , ) with self.assertRaises(_A ): lowercase : List[Any] = text_generator('''test''' , return_full_text=_A , return_text=_A ) with self.assertRaises(_A ): lowercase : List[str] = text_generator('''test''' , return_full_text=_A , return_tensors=_A ) with self.assertRaises(_A ): lowercase : List[str] = text_generator('''test''' , return_text=_A , return_tensors=_A ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowercase : Tuple = text_generator('''''' ) self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowercase : List[str] = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowercase : Any = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 10_000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 500 , max_new_tokens=20 ) lowercase : List[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(_A ): text_generator( '''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def __a ( self : Any ) -> Union[str, Any]: """simple docstring""" import torch # Classic `model_kwargs` lowercase : List[Any] = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowercase : Any = pipe('''This is a test''' ) self.assertEqual( _A , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowercase : Optional[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowercase : List[Any] = pipe('''This is a test''' ) self.assertEqual( _A , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowercase : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowercase : Any = pipe('''This is a test''' ) self.assertEqual( _A , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def __a ( self : List[Any] ) -> List[str]: """simple docstring""" import torch lowercase : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def __a ( self : List[Any] ) -> Optional[Any]: """simple docstring""" import torch lowercase : int = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=_A , top_p=0.5 ) def __a ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" lowercase : Union[str, Any] = '''Hello world''' lowercase : List[str] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": lowercase : int = logging.get_logger('''transformers.generation.tf_utils''' ) else: lowercase : str = logging.get_logger('''transformers.generation.utils''' ) lowercase : Optional[int] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(_A ) as cl: lowercase : List[Any] = text_generator(_A , max_length=10 , max_new_tokens=1 ) self.assertIn(_A , cl.out ) # The user only sets one -> no warning with CaptureLogger(_A ) as cl: lowercase : Optional[Any] = text_generator(_A , max_new_tokens=1 ) self.assertNotIn(_A , cl.out ) with CaptureLogger(_A ) as cl: lowercase : Any = text_generator(_A , max_length=10 ) self.assertNotIn(_A , cl.out )
308
import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowerCAmelCase_ = logging.get_logger(__name__) class _A ( enum.Enum ): _UpperCamelCase : Union[str, Any] = 0 _UpperCamelCase : Any = 1 @add_end_docstrings(_lowerCamelCase ) class _A ( _lowerCamelCase ): _UpperCamelCase : List[Any] = '''generated''' def __init__( self : str , *_A : int , **_A : str ) -> Union[str, Any]: """simple docstring""" super().__init__(*_A , **_A ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def __a ( self : int , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=None , _A : Dict=None , _A : Union[str, Any]=None , _A : int=None , **_A : Optional[int] , ) -> List[Any]: """simple docstring""" lowercase : str = {} if truncation is not None: lowercase : Tuple = truncation lowercase : Tuple = generate_kwargs lowercase : Optional[Any] = {} if return_tensors is not None and return_type is None: lowercase : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: lowercase : Dict = return_type if clean_up_tokenization_spaces is not None: lowercase : Dict = clean_up_tokenization_spaces if stop_sequence is not None: lowercase : Dict = self.tokenizer.encode(_A , add_special_tokens=_A ) if len(_A ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) lowercase : List[str] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def __a ( self : str , _A : int , _A : int , _A : int ) -> List[Any]: """simple docstring""" return True def __a ( self : Union[str, Any] , *_A : Union[str, Any] , _A : List[Any] ) -> Dict: """simple docstring""" lowercase : Tuple = self.model.config.prefix if self.model.config.prefix is not None else '''''' if isinstance(args[0] , _A ): if self.tokenizer.pad_token_id is None: raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' ) lowercase : List[Any] = ([prefix + arg for arg in args[0]],) lowercase : Dict = True elif isinstance(args[0] , _A ): lowercase : Optional[int] = (prefix + args[0],) lowercase : Union[str, Any] = False else: raise ValueError( f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" ) lowercase : Any = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self : Union[str, Any] , *_A : Optional[int] , **_A : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase : Any = super().__call__(*_A , **_A ) if ( isinstance(args[0] , _A ) and all(isinstance(_A , _A ) for el in args[0] ) and all(len(_A ) == 1 for res in result ) ): return [res[0] for res in result] return result def __a ( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : List[str] ) -> List[Any]: """simple docstring""" lowercase : Optional[int] = self._parse_and_tokenize(_A , truncation=_A , **_A ) return inputs def __a ( self : int , _A : Optional[Any] , **_A : Any ) -> Any: """simple docstring""" if self.framework == "pt": lowercase , lowercase : List[Any] = model_inputs['''input_ids'''].shape elif self.framework == "tf": lowercase , lowercase : Optional[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy() lowercase : int = generate_kwargs.get('''min_length''' , self.model.config.min_length ) lowercase : Optional[int] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] ) lowercase : int = self.model.generate(**_A , **_A ) lowercase : int = output_ids.shape[0] if self.framework == "pt": lowercase : Optional[Any] = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": lowercase : Tuple = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def __a ( self : Union[str, Any] , _A : str , _A : Optional[int]=ReturnType.TEXT , _A : Optional[int]=False ) -> Tuple: """simple docstring""" lowercase : Any = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: lowercase : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids} elif return_type == ReturnType.TEXT: lowercase : Dict = { f"""{self.return_name}_text""": self.tokenizer.decode( _A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) } records.append(_A ) return records @add_end_docstrings(_lowerCamelCase ) class _A ( _lowerCamelCase ): _UpperCamelCase : List[str] = '''summary''' def __call__( self : List[Any] , *_A : List[str] , **_A : Union[str, Any] ) -> Optional[int]: """simple docstring""" return super().__call__(*_A , **_A ) def __a ( self : Any , _A : int , _A : int , _A : int ) -> bool: """simple docstring""" if max_length < min_length: logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" ) if input_length < max_length: logger.warning( f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """ '''a summarization task, where outputs shorter than the input are typically wanted, you might ''' f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" ) @add_end_docstrings(_lowerCamelCase ) class _A ( _lowerCamelCase ): _UpperCamelCase : List[str] = '''translation''' def __a ( self : Union[str, Any] , _A : int , _A : int , _A : int ) -> List[Any]: """simple docstring""" if input_length > 0.9 * max_length: logger.warning( f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """ '''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' ) return True def __a ( self : Optional[Any] , *_A : Optional[Any] , _A : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , _A : List[Any]=None , _A : Any=None ) -> Dict: """simple docstring""" if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ): return self.tokenizer._build_translation_inputs( *_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A ) else: return super()._parse_and_tokenize(*_A , truncation=_A ) def __a ( self : Any , _A : Tuple=None , _A : Any=None , **_A : Any ) -> Optional[int]: """simple docstring""" lowercase , lowercase , lowercase : Dict = super()._sanitize_parameters(**_A ) if src_lang is not None: lowercase : Optional[Any] = src_lang if tgt_lang is not None: lowercase : Dict = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. lowercase : Dict = kwargs.get('''task''' , self.task ) lowercase : List[str] = task.split('''_''' ) if task and len(_A ) == 4: # translation, XX, to YY lowercase : Any = items[1] lowercase : List[str] = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self : Tuple , *_A : Union[str, Any] , **_A : List[Any] ) -> List[Any]: """simple docstring""" return super().__call__(*_A , **_A )
308
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { 'post_extract_proj': 'feature_projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.upsample.0': 'encoder.upsample.projection', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'layer_norm', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' for attribute in key.split('''.''' ): lowercase : Tuple = getattr(__magic_name__ , __magic_name__ ) if weight_type is not None: lowercase : Tuple = getattr(__magic_name__ , __magic_name__ ).shape else: lowercase : Any = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowercase : List[str] = value elif weight_type == "weight_g": lowercase : Union[str, Any] = value elif weight_type == "weight_v": lowercase : str = value elif weight_type == "bias": lowercase : Any = value else: lowercase : Any = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' lowercase : Tuple = [] lowercase : Tuple = fairseq_model.state_dict() lowercase : str = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): lowercase : List[Any] = False if "conv_layers" in name: load_conv_layer( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , ) lowercase : Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): lowercase : List[str] = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: lowercase : Union[str, Any] = True if "*" in mapped_key: lowercase : Optional[Any] = name.split(__magic_name__ )[0].split('''.''' )[-2] lowercase : Optional[int] = mapped_key.replace('''*''' , __magic_name__ ) if "weight_g" in name: lowercase : Optional[int] = '''weight_g''' elif "weight_v" in name: lowercase : List[Any] = '''weight_v''' elif "weight" in name: lowercase : Optional[int] = '''weight''' elif "bias" in name: lowercase : List[str] = '''bias''' else: lowercase : Any = None set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) continue if not is_used: unused_weights.append(__magic_name__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' lowercase : Optional[int] = full_name.split('''conv_layers.''' )[-1] lowercase : Optional[int] = name.split('''.''' ) lowercase : Tuple = int(items[0] ) lowercase : int = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowercase : int = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowercase : Union[str, Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) lowercase : Tuple = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowercase : int = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__magic_name__ ) def snake_case( __magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' lowercase : Tuple = SEWConfig() if is_finetuned: lowercase : Any = model.wav_encoder.wav_model.cfg else: lowercase : Dict = model.cfg lowercase : Dict = fs_config.conv_bias lowercase : Dict = eval(fs_config.conv_feature_layers ) lowercase : List[str] = [x[0] for x in conv_layers] lowercase : List[str] = [x[1] for x in conv_layers] lowercase : int = [x[2] for x in conv_layers] lowercase : Tuple = '''gelu''' lowercase : Optional[Any] = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group''' lowercase : List[Any] = 0.0 lowercase : int = fs_config.activation_fn.name lowercase : List[Any] = fs_config.encoder_embed_dim lowercase : str = 0.0_2 lowercase : int = fs_config.encoder_ffn_embed_dim lowercase : Tuple = 1e-5 lowercase : Tuple = fs_config.encoder_layerdrop lowercase : List[str] = fs_config.encoder_attention_heads lowercase : Tuple = fs_config.conv_pos_groups lowercase : List[str] = fs_config.conv_pos lowercase : str = len(__magic_name__ ) lowercase : int = fs_config.encoder_layers lowercase : List[str] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: lowercase : int = model.cfg lowercase : str = fs_config.final_dropout lowercase : Union[str, Any] = fs_config.layerdrop lowercase : Union[str, Any] = fs_config.activation_dropout lowercase : Union[str, Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 lowercase : List[str] = fs_config.attention_dropout lowercase : List[str] = fs_config.dropout_input lowercase : Optional[int] = fs_config.dropout lowercase : List[str] = fs_config.mask_channel_length lowercase : Tuple = fs_config.mask_channel_prob lowercase : Union[str, Any] = fs_config.mask_length lowercase : Union[str, Any] = fs_config.mask_prob lowercase : str = '''Wav2Vec2FeatureExtractor''' lowercase : Optional[int] = '''Wav2Vec2CTCTokenizer''' return config @torch.no_grad() def snake_case( __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=True ) -> Optional[Any]: '''simple docstring''' if is_finetuned: lowercase , lowercase , lowercase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: lowercase , lowercase , lowercase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: lowercase : Any = SEWConfig.from_pretrained(__magic_name__ ) else: lowercase : List[str] = convert_config(model[0] , __magic_name__ ) lowercase : Optional[int] = model[0].eval() lowercase : int = True if config.feat_extract_norm == '''layer''' else False lowercase : Any = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , ) if is_finetuned: if dict_path: lowercase : Tuple = Dictionary.load(__magic_name__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowercase : Any = target_dict.pad_index lowercase : List[Any] = target_dict.bos_index lowercase : Union[str, Any] = target_dict.pad_index lowercase : Tuple = target_dict.bos_index lowercase : str = target_dict.eos_index lowercase : str = len(target_dict.symbols ) lowercase : Optional[Any] = os.path.join(__magic_name__ , '''vocab.json''' ) if not os.path.isdir(__magic_name__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__magic_name__ ) ) return os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , __magic_name__ ) lowercase : Dict = WavaVecaCTCTokenizer( __magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__magic_name__ , ) lowercase : int = WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ ) processor.save_pretrained(__magic_name__ ) lowercase : Optional[Any] = SEWForCTC(__magic_name__ ) else: lowercase : Optional[int] = SEWModel(__magic_name__ ) feature_extractor.save_pretrained(__magic_name__ ) recursively_load_weights(__magic_name__ , __magic_name__ , __magic_name__ ) hf_model.save_pretrained(__magic_name__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) lowerCAmelCase_ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
308
# Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCAmelCase_ = get_logger(__name__) class _A : _UpperCamelCase : int = '''dummy_data''' _UpperCamelCase : Tuple = '''datasets''' _UpperCamelCase : Optional[int] = False def __init__( self : Any , _A : str , _A : str , _A : Union[Version, str] , _A : Optional[str] = None , _A : bool = False , _A : bool = True , _A : Optional[List[Callable]] = None , ) -> Dict: """simple docstring""" lowercase : Tuple = 0 lowercase : List[Any] = dataset_name lowercase : int = cache_dir lowercase : str = use_local_dummy_data lowercase : Union[str, Any] = config # download_callbacks take a single url as input lowercase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root lowercase : Any = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general lowercase : Union[str, Any] = str(_A ) # to be downloaded lowercase : Tuple = None lowercase : Optional[int] = None @property def __a ( self : str ) -> Dict: """simple docstring""" if self._dummy_file is None: lowercase : Optional[Any] = self.download_dummy_data() return self._dummy_file @property def __a ( self : int ) -> Optional[Any]: """simple docstring""" if self.config is not None: # structure is dummy / config_name / version_name return os.path.join('''dummy''' , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join('''dummy''' , self.version_name ) @property def __a ( self : List[Any] ) -> int: """simple docstring""" return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' ) def __a ( self : str ) -> int: """simple docstring""" lowercase : str = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) lowercase : List[str] = cached_path( _A , cache_dir=self.cache_dir , extract_compressed_file=_A , force_extract=_A ) return os.path.join(_A , self.dummy_file_name ) @property def __a ( self : str ) -> Tuple: """simple docstring""" return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def __a ( self : Optional[int] ) -> Optional[int]: """simple docstring""" if self._bucket_url is None: lowercase : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) ) return self._bucket_url @property def __a ( self : Tuple ) -> List[str]: """simple docstring""" if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] ) def __a ( self : Union[str, Any] , _A : Dict , *_A : Union[str, Any] ) -> Optional[Any]: """simple docstring""" if self.load_existing_dummy_data: # dummy data is downloaded and tested lowercase : Union[str, Any] = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned lowercase : Optional[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(_A , _A ): return self.create_dummy_data_dict(_A , _A ) elif isinstance(_A , (list, tuple) ): return self.create_dummy_data_list(_A , _A ) else: return self.create_dummy_data_single(_A , _A ) def __a ( self : str , _A : Union[str, Any] , *_A : Dict ) -> Dict: """simple docstring""" return self.download_and_extract(_A ) def __a ( self : str , _A : List[str] , _A : Any ) -> Union[str, Any]: """simple docstring""" return self.download_and_extract(_A ) def __a ( self : Optional[int] , _A : Tuple , *_A : str , **_A : Any ) -> Optional[Any]: """simple docstring""" return path def __a ( self : List[str] ) -> str: """simple docstring""" return {} def __a ( self : List[str] , _A : Union[str, Any] , _A : List[Any] ) -> Optional[Any]: """simple docstring""" lowercase : Any = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(_A , _A ): for single_url in single_urls: download_callback(_A ) else: lowercase : List[str] = single_urls download_callback(_A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(_A , _A ): lowercase : int = [os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) for x in single_urls] else: lowercase : int = single_urls lowercase : Any = os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) lowercase : str = value # make sure that values are unique if all(isinstance(_A , _A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique lowercase : str = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def __a ( self : Optional[int] , _A : List[Any] , _A : Tuple ) -> Tuple: """simple docstring""" lowercase : Optional[Any] = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one lowercase : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , _A ) ) for url in data_url ) lowercase : str = all( url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): lowercase : List[str] = [data_url[0]] * len(_A ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(_A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowercase : Optional[int] = os.path.join(_A , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) ) dummy_data_list.append(_A ) return dummy_data_list def __a ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ) -> List[str]: """simple docstring""" for download_callback in self.download_callbacks: download_callback(_A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowercase : Dict = os.path.join(_A , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) ) if os.path.exists(_A ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def __a ( self : Union[str, Any] ) -> Any: """simple docstring""" pass def __a ( self : Any ) -> Dict: """simple docstring""" pass def __a ( self : int , _A : Optional[Any] ) -> Dict: """simple docstring""" def _iter_archive_members(_A : Optional[int] ): # this preserves the order of the members inside the ZIP archive lowercase : int = Path(self.dummy_file ).parent lowercase : List[str] = path.relative_to(_A ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: lowercase : Optional[int] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(_A ) lowercase : Tuple = Path(_A ) lowercase : List[Any] = _iter_archive_members(_A ) if self.use_local_dummy_data else path.rglob('''*''' ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ): yield file_path.relative_to(_A ).as_posix(), file_path.open('''rb''' ) def __a ( self : Optional[Any] , _A : Dict ) -> Union[str, Any]: """simple docstring""" if not isinstance(_A , _A ): lowercase : Dict = [paths] for path in paths: if os.path.isfile(_A ): if os.path.basename(_A ).startswith(('''.''', '''__''') ): return yield path else: for dirpath, dirnames, filenames in os.walk(_A ): if os.path.basename(_A ).startswith(('''.''', '''__''') ): continue dirnames.sort() for filename in sorted(_A ): if filename.startswith(('''.''', '''__''') ): continue yield os.path.join(_A , _A )
308
1
import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class _A ( _lowerCamelCase ): def __init__( self : str , *_A : Union[str, Any] , **_A : List[str] ) -> None: """simple docstring""" warnings.warn( '''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use LayoutLMv2ImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
308
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' lowercase : Union[str, Any] = [False] * len(__magic_name__ ) lowercase : Optional[int] = [] queue.append(__magic_name__ ) lowercase : int = True while queue: lowercase : Union[str, Any] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__magic_name__ ) lowercase : Dict = True lowercase : List[str] = u return visited[t] def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple: '''simple docstring''' lowercase : List[str] = [-1] * (len(__magic_name__ )) lowercase : Tuple = 0 while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): lowercase : Any = float('''Inf''' ) lowercase : str = sink while s != source: # Find the minimum value in select path lowercase : Any = min(__magic_name__ , graph[parent[s]][s] ) lowercase : Dict = parent[s] max_flow += path_flow lowercase : Union[str, Any] = sink while v != source: lowercase : List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowercase : Optional[int] = parent[v] return max_flow lowerCAmelCase_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] lowerCAmelCase_ , lowerCAmelCase_ = 0, 5 print(ford_fulkerson(graph, source, sink))
308
1
import heapq def snake_case( __magic_name__ ) -> set[int]: '''simple docstring''' lowercase : list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(__magic_name__ , [-1 * len(__magic_name__ ), (key, value)] ) # chosen_vertices = set of chosen vertices lowercase : Optional[int] = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices lowercase : Optional[int] = heapq.heappop(__magic_name__ )[1][0] chosen_vertices.add(__magic_name__ ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: lowercase : Any = elem[1][1].index(__magic_name__ ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(__magic_name__ ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase_ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
308
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'vocab_file': 'vocab.txt'} lowerCAmelCase_ = { 'vocab_file': { 'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt', }, } lowerCAmelCase_ = { 'openbmb/cpm-ant-10b': 10_24, } def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : Optional[int] = collections.OrderedDict() with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader: lowercase : str = reader.readlines() for index, token in enumerate(__magic_name__ ): lowercase : Union[str, Any] = token.rstrip('''\n''' ) lowercase : List[Any] = index return vocab class _A ( _lowerCamelCase ): def __init__( self : List[str] , _A : Any , _A : List[str]="<unk>" , _A : Union[str, Any]=200 ) -> List[Any]: """simple docstring""" lowercase : Optional[int] = vocab lowercase : List[str] = unk_token lowercase : Any = max_input_chars_per_word def __a ( self : List[str] , _A : Tuple ) -> str: """simple docstring""" lowercase : Dict = list(_A ) if len(_A ) > self.max_input_chars_per_word: return [self.unk_token] lowercase : int = 0 lowercase : Dict = [] while start < len(_A ): lowercase : Optional[Any] = len(_A ) lowercase : List[str] = None while start < end: lowercase : List[Any] = ''''''.join(chars[start:end] ) if substr in self.vocab: lowercase : Union[str, Any] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(_A ) lowercase : Dict = end return sub_tokens class _A ( _lowerCamelCase ): _UpperCamelCase : List[str] = VOCAB_FILES_NAMES _UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask'''] _UpperCamelCase : int = False def __init__( self : List[str] , _A : int , _A : Optional[Any]="<d>" , _A : Any="</d>" , _A : Optional[Any]="<s>" , _A : Any="</s>" , _A : Any="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]="</n>" , _A : List[str]="</_>" , _A : Optional[Any]="left" , **_A : str , ) -> Tuple: """simple docstring""" requires_backends(self , ['''jieba'''] ) super().__init__( bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , ) lowercase : str = bod_token lowercase : str = eod_token lowercase : Any = load_vocab(_A ) lowercase : List[Any] = self.encoder[space_token] lowercase : Tuple = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] lowercase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) ) lowercase : int = {v: k for k, v in self.encoder.items()} lowercase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def __a ( self : Dict ) -> Optional[int]: """simple docstring""" return self.encoder[self.bod_token] @property def __a ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" return self.encoder[self.eod_token] @property def __a ( self : List[str] ) -> List[str]: """simple docstring""" return self.encoder["\n"] @property def __a ( self : List[Any] ) -> int: """simple docstring""" return len(self.encoder ) def __a ( self : Union[str, Any] ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self : str , _A : List[str] ) -> Tuple: """simple docstring""" lowercase : int = [] for x in jieba.cut(_A , cut_all=_A ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) ) return output_tokens def __a ( self : List[Any] , _A : Tuple , **_A : Optional[int] ) -> Any: """simple docstring""" lowercase : List[str] = [i for i in token_ids if i >= 0] lowercase : Any = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(_A , **_A ) def __a ( self : List[Any] , _A : int ) -> Optional[Any]: """simple docstring""" return token in self.encoder def __a ( self : Dict , _A : List[str] ) -> str: """simple docstring""" return "".join(_A ) def __a ( self : List[str] , _A : List[str] ) -> Any: """simple docstring""" return self.encoder.get(_A , self.encoder.get(self.unk_token ) ) def __a ( self : Tuple , _A : Union[str, Any] ) -> Tuple: """simple docstring""" return self.decoder.get(_A , self.unk_token ) def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if os.path.isdir(_A ): lowercase : str = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: lowercase : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory lowercase : Any = 0 if " " in self.encoder: lowercase : List[Any] = self.encoder[''' '''] del self.encoder[" "] if "\n" in self.encoder: lowercase : Dict = self.encoder['''\n'''] del self.encoder["\n"] lowercase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) ) with open(_A , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) lowercase : Any = token_index writer.write(token + '''\n''' ) index += 1 return (vocab_file,) def __a ( self : str , _A : List[int] , _A : List[int] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is not None: return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) return [1] + ([0] * len(_A ))
308
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { 'microsoft/unispeech-sat-base-100h-libri-ft': ( 'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json' ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class _A ( _lowerCamelCase ): _UpperCamelCase : Optional[int] = '''unispeech-sat''' def __init__( self : Tuple , _A : List[str]=32 , _A : str=768 , _A : Optional[int]=12 , _A : List[Any]=12 , _A : int=3_072 , _A : Dict="gelu" , _A : int=0.1 , _A : Optional[int]=0.1 , _A : str=0.1 , _A : str=0.0 , _A : Tuple=0.0 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : int=0.02 , _A : List[str]=1E-5 , _A : Tuple="group" , _A : Optional[int]="gelu" , _A : List[Any]=(512, 512, 512, 512, 512, 512, 512) , _A : Dict=(5, 2, 2, 2, 2, 2, 2) , _A : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , _A : List[str]=False , _A : Optional[int]=128 , _A : int=16 , _A : List[Any]=False , _A : Optional[int]=True , _A : int=0.05 , _A : Any=10 , _A : int=2 , _A : List[str]=0.0 , _A : str=10 , _A : Optional[int]=0 , _A : str=320 , _A : Union[str, Any]=2 , _A : Any=0.1 , _A : Tuple=100 , _A : Any=256 , _A : List[Any]=256 , _A : Dict=0.1 , _A : Any="mean" , _A : List[str]=False , _A : Union[str, Any]=False , _A : Optional[int]=256 , _A : Optional[Any]=(512, 512, 512, 512, 1_500) , _A : List[str]=(5, 3, 3, 1, 1) , _A : Union[str, Any]=(1, 2, 3, 1, 1) , _A : List[str]=512 , _A : str=0 , _A : Optional[int]=1 , _A : Dict=2 , _A : str=504 , **_A : Union[str, Any] , ) -> int: """simple docstring""" super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A ) lowercase : List[Any] = hidden_size lowercase : int = feat_extract_norm lowercase : Union[str, Any] = feat_extract_activation lowercase : List[Any] = list(_A ) lowercase : str = list(_A ) lowercase : Optional[int] = list(_A ) lowercase : Optional[Any] = conv_bias lowercase : str = num_conv_pos_embeddings lowercase : Any = num_conv_pos_embedding_groups lowercase : Optional[Any] = len(self.conv_dim ) lowercase : Optional[int] = num_hidden_layers lowercase : List[Any] = intermediate_size lowercase : Any = hidden_act lowercase : Any = num_attention_heads lowercase : int = hidden_dropout lowercase : Tuple = attention_dropout lowercase : Optional[int] = activation_dropout lowercase : List[str] = feat_proj_dropout lowercase : Tuple = final_dropout lowercase : Union[str, Any] = layerdrop lowercase : int = layer_norm_eps lowercase : List[str] = initializer_range lowercase : List[str] = vocab_size lowercase : int = num_clusters lowercase : int = do_stable_layer_norm lowercase : Tuple = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase : List[Any] = apply_spec_augment lowercase : Optional[Any] = mask_time_prob lowercase : Any = mask_time_length lowercase : Union[str, Any] = mask_time_min_masks lowercase : Union[str, Any] = mask_feature_prob lowercase : Tuple = mask_feature_length lowercase : Dict = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowercase : Any = num_codevectors_per_group lowercase : Any = num_codevector_groups lowercase : List[str] = contrastive_logits_temperature lowercase : str = feat_quantizer_dropout lowercase : Optional[int] = num_negatives lowercase : Tuple = codevector_dim lowercase : List[str] = proj_codevector_dim lowercase : Optional[Any] = diversity_loss_weight # ctc loss lowercase : Optional[int] = ctc_loss_reduction lowercase : str = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase : Any = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase : List[Any] = list(_A ) lowercase : Dict = list(_A ) lowercase : Optional[Any] = list(_A ) lowercase : str = xvector_output_dim @property def __a ( self : List[str] ) -> List[Any]: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
308
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]: '''simple docstring''' lowercase : int = 1.5 lowercase : int = int(factor * num_class_images ) lowercase : Any = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 ) os.makedirs(F"""{class_data_dir}/images""" , exist_ok=__magic_name__ ) if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images: return while True: lowercase : str = client.query(text=__magic_name__ ) if len(__magic_name__ ) >= factor * num_class_images or num_images > 1e4: break else: lowercase : List[str] = int(factor * num_images ) lowercase : List[str] = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , ) lowercase : Dict = 0 lowercase : Optional[Any] = 0 lowercase : List[Any] = tqdm(desc='''downloading real regularization images''' , total=__magic_name__ ) with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open( F"""{class_data_dir}/images.txt""" , '''w''' ) as fa: while total < num_class_images: lowercase : int = class_images[count] count += 1 try: lowercase : int = requests.get(images['''url'''] ) if img.status_code == 2_00: lowercase : List[Any] = Image.open(BytesIO(img.content ) ) with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f: f.write(img.content ) fa.write(images['''caption'''] + '''\n''' ) fa.write(images['''url'''] + '''\n''' ) fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def snake_case( ) -> Optional[int]: '''simple docstring''' lowercase : List[str] = argparse.ArgumentParser('''''' , add_help=__magic_name__ ) parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ ) parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ ) parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=__magic_name__ ) return parser.parse_args() if __name__ == "__main__": lowerCAmelCase_ = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
308
1
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( _lowerCamelCase , unittest.TestCase ): _UpperCamelCase : Tuple = KandinskyVaaInpaintPipeline _UpperCamelCase : str = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image'''] _UpperCamelCase : int = [ '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''', ] _UpperCamelCase : Any = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] _UpperCamelCase : Tuple = False @property def __a ( self : Optional[int] ) -> Tuple: """simple docstring""" return 32 @property def __a ( self : Dict ) -> Tuple: """simple docstring""" return 32 @property def __a ( self : Any ) -> Dict: """simple docstring""" return self.time_input_dim @property def __a ( self : int ) -> Tuple: """simple docstring""" return self.time_input_dim * 4 @property def __a ( self : Optional[int] ) -> List[Any]: """simple docstring""" return 100 @property def __a ( self : str ) -> int: """simple docstring""" torch.manual_seed(0 ) lowercase : Tuple = { '''in_channels''': 9, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } lowercase : Any = UNetaDConditionModel(**_A ) return model @property def __a ( self : List[str] ) -> str: """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __a ( self : Any ) -> str: """simple docstring""" torch.manual_seed(0 ) lowercase : Any = VQModel(**self.dummy_movq_kwargs ) return model def __a ( self : Tuple ) -> str: """simple docstring""" lowercase : Optional[int] = self.dummy_unet lowercase : Dict = self.dummy_movq lowercase : Dict = DDIMScheduler( num_train_timesteps=1_000 , beta_schedule='''linear''' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=_A , set_alpha_to_one=_A , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_A , ) lowercase : Dict = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __a ( self : List[Any] , _A : Optional[Any] , _A : List[str]=0 ) -> Tuple: """simple docstring""" lowercase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A ) lowercase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _A ) # create init_image lowercase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A ) lowercase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase : List[Any] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((256, 256) ) # create mask lowercase : List[Any] = np.ones((64, 64) , dtype=np.floataa ) lowercase : str = 0 if str(_A ).startswith('''mps''' ): lowercase : Union[str, Any] = torch.manual_seed(_A ) else: lowercase : List[str] = torch.Generator(device=_A ).manual_seed(_A ) lowercase : List[Any] = { '''image''': init_image, '''mask_image''': mask, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 2, '''guidance_scale''': 4.0, '''output_type''': '''np''', } return inputs def __a ( self : Any ) -> int: """simple docstring""" lowercase : Tuple = '''cpu''' lowercase : List[str] = self.get_dummy_components() lowercase : List[Any] = self.pipeline_class(**_A ) lowercase : List[Any] = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) lowercase : Any = pipe(**self.get_dummy_inputs(_A ) ) lowercase : Dict = output.images lowercase : Dict = pipe( **self.get_dummy_inputs(_A ) , return_dict=_A , )[0] lowercase : Dict = image[0, -3:, -3:, -1] lowercase : int = image_from_tuple[0, -3:, -3:, -1] print(f"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) lowercase : Optional[Any] = np.array( [0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def __a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class _A ( unittest.TestCase ): def __a ( self : Tuple ) -> str: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : str ) -> int: """simple docstring""" lowercase : List[str] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' ) lowercase : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) lowercase : str = np.ones((768, 768) , dtype=np.floataa ) lowercase : List[str] = 0 lowercase : Union[str, Any] = '''a hat''' lowercase : List[Any] = KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_A ) lowercase : List[str] = KandinskyVaaInpaintPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa ) lowercase : int = pipeline.to(_A ) pipeline.set_progress_bar_config(disable=_A ) lowercase : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowercase , lowercase : str = pipe_prior( _A , generator=_A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() lowercase : str = pipeline( image=_A , mask_image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , ) lowercase : int = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_A , _A )
308
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def snake_case( ) -> int: '''simple docstring''' lowercase : List[str] = ArgumentParser( description=( '''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes''' ) ) # Optional arguments for the launch helper parser.add_argument('''--num_cores''' , type=__magic_name__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' ) # positional parser.add_argument( '''training_script''' , type=__magic_name__ , help=( '''The full path to the single TPU training ''' '''program/script to be launched in parallel, ''' '''followed by all the arguments for the ''' '''training script''' ) , ) # rest from the training program parser.add_argument('''training_script_args''' , nargs=__magic_name__ ) return parser.parse_args() def snake_case( ) -> Union[str, Any]: '''simple docstring''' lowercase : Optional[Any] = parse_args() # Import training_script as a module. lowercase : Optional[Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowercase : int = script_fpath.stem lowercase : List[Any] = importlib.import_module(__magic_name__ ) # Patch sys.argv lowercase : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
308
1
import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class _A ( _lowerCamelCase , unittest.TestCase ): _UpperCamelCase : Union[str, Any] = DebertaVaTokenizer _UpperCamelCase : Tuple = DebertaVaTokenizerFast _UpperCamelCase : int = True _UpperCamelCase : List[str] = True def __a ( self : Tuple ) -> Tuple: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowercase : Tuple = DebertaVaTokenizer(_A , unk_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self : Any , _A : Any ) -> Any: """simple docstring""" lowercase : Tuple = '''this is a test''' lowercase : Union[str, Any] = '''this is a test''' return input_text, output_text def __a ( self : Union[str, Any] ) -> Any: """simple docstring""" lowercase : List[Any] = '''<pad>''' lowercase : Tuple = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def __a ( self : str ) -> List[Any]: """simple docstring""" lowercase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''[PAD]''' ) self.assertEqual(len(_A ) , 30_001 ) def __a ( self : List[Any] ) -> Optional[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 30_000 ) def __a ( self : List[str] ) -> Dict: """simple docstring""" lowercase : Optional[Any] = ''' \tHeLLo!how \n Are yoU? ''' lowercase : Tuple = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?'''] # fmt: on lowercase : Any = DebertaVaTokenizer(_A , do_lower_case=_A ) lowercase : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) lowercase : List[str] = DebertaVaTokenizerFast(_A , do_lower_case=_A ) lowercase : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def __a ( self : List[Any] ) -> Dict: """simple docstring""" pass @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def __a ( self : Tuple ) -> List[Any]: """simple docstring""" pass def __a ( self : List[Any] ) -> Optional[int]: """simple docstring""" lowercase : List[Any] = '''I was born in 92000, and this is falsé.''' lowercase : Any = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase : Union[str, Any] = DebertaVaTokenizer(_A , split_by_punct=_A ) lowercase : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) lowercase : List[str] = DebertaVaTokenizerFast(_A , split_by_punct=_A ) lowercase : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __a ( self : List[Any] ) -> List[Any]: """simple docstring""" lowercase : List[str] = '''I was born in 92000, and this is falsé.''' lowercase : List[str] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase : Tuple = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) lowercase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) lowercase : Optional[int] = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) lowercase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __a ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" lowercase : Union[str, Any] = '''I was born in 92000, and this is falsé.''' lowercase : Optional[int] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase : Any = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) lowercase : Tuple = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) lowercase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __a ( self : Optional[int] ) -> List[Any]: """simple docstring""" lowercase : Tuple = '''I was born in 92000, and this is falsé.''' lowercase : Any = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase : Dict = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) lowercase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) lowercase : Tuple = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) lowercase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __a ( self : Dict ) -> int: """simple docstring""" lowercase : List[str] = ''' \tHeLLo!how \n Are yoU? ''' lowercase : Optional[Any] = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?'''] # fmt: on lowercase : Optional[int] = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) lowercase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) lowercase : Union[str, Any] = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) lowercase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __a ( self : Dict ) -> str: """simple docstring""" lowercase : str = self.get_tokenizer() lowercase : Optional[int] = self.get_rust_tokenizer() lowercase : List[str] = '''I was born in 92000, and this is falsé.''' lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) lowercase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) lowercase : Optional[Any] = tokenizer.encode(_A , add_special_tokens=_A ) lowercase : int = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) lowercase : int = self.get_rust_tokenizer() lowercase : str = tokenizer.encode(_A ) lowercase : List[str] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def __a ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" lowercase : Tuple = '''This is a test''' lowercase : Optional[Any] = [13, 1, 4_398, 25, 21, 1_289] lowercase : Dict = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase : Optional[Any] = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase : Optional[Any] = DebertaVaTokenizer(_A , keep_accents=_A ) lowercase : Union[str, Any] = DebertaVaTokenizerFast(_A , keep_accents=_A ) lowercase : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) lowercase : int = tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) lowercase : List[Any] = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) lowercase : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) lowercase : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) lowercase : str = rust_tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) # fmt: off lowercase : List[str] = '''I was born in 92000, and this is falsé.''' lowercase : str = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] lowercase : int = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ] lowercase : Any = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase : Dict = tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) lowercase : Dict = tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) lowercase : str = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) lowercase : Tuple = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) lowercase : Dict = rust_tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) def __a ( self : List[str] ) -> List[Any]: """simple docstring""" lowercase : Any = DebertaVaTokenizer(_A ) lowercase : Any = tokenizer.encode('''sequence builders''' ) lowercase : int = tokenizer.encode('''multi-sequence build''' ) lowercase : str = tokenizer.build_inputs_with_special_tokens(_A ) lowercase : str = tokenizer.build_inputs_with_special_tokens(_A , _A ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , ) @slow def __a ( self : Optional[Any] ) -> Tuple: """simple docstring""" lowercase : List[str] = {'''input_ids''': [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
308
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase_ = logging.get_logger(__name__) def snake_case( __magic_name__ ) -> List[List[ImageInput]]: '''simple docstring''' if isinstance(__magic_name__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__magic_name__ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__magic_name__ ): return [[videos]] raise ValueError(F"""Could not make batched video from {videos}""" ) class _A ( _lowerCamelCase ): _UpperCamelCase : str = ['''pixel_values'''] def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[int] , ) -> None: """simple docstring""" super().__init__(**_A ) lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 224} lowercase : Tuple = get_size_dict(_A , default_to_square=_A ) lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowercase : Dict = get_size_dict(_A , param_name='''crop_size''' ) lowercase : List[str] = do_resize lowercase : Optional[Any] = size lowercase : List[str] = do_center_crop lowercase : List[Any] = crop_size lowercase : str = resample lowercase : Tuple = do_rescale lowercase : Any = rescale_factor lowercase : Tuple = do_normalize lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray: """simple docstring""" lowercase : Tuple = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" in size: lowercase : Dict = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A ) elif "height" in size and "width" in size: lowercase : Union[str, Any] = (size['''height'''], size['''width''']) else: raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(_A , size=_A , resample=_A , data_format=_A , **_A ) def __a ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray: """simple docstring""" lowercase : Optional[Any] = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A ) def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Union[str, Any]: """simple docstring""" return rescale(_A , scale=_A , data_format=_A , **_A ) def __a ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray: """simple docstring""" return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def __a ( self : int , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: """simple docstring""" if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowercase : Union[str, Any] = to_numpy_array(_A ) if do_resize: lowercase : List[Any] = self.resize(image=_A , size=_A , resample=_A ) if do_center_crop: lowercase : Optional[int] = self.center_crop(_A , size=_A ) if do_rescale: lowercase : Tuple = self.rescale(image=_A , scale=_A ) if do_normalize: lowercase : Union[str, Any] = self.normalize(image=_A , mean=_A , std=_A ) lowercase : Any = to_channel_dimension_format(_A , _A ) return image def __a ( self : List[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Union[str, Any] , ) -> PIL.Image.Image: """simple docstring""" lowercase : str = do_resize if do_resize is not None else self.do_resize lowercase : Optional[Any] = resample if resample is not None else self.resample lowercase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase : str = do_rescale if do_rescale is not None else self.do_rescale lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean lowercase : Optional[Any] = image_std if image_std is not None else self.image_std lowercase : str = size if size is not None else self.size lowercase : Any = get_size_dict(_A , default_to_square=_A ) lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size lowercase : str = get_size_dict(_A , param_name='''crop_size''' ) if not valid_images(_A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) lowercase : Union[str, Any] = make_batched(_A ) lowercase : Dict = [ [ self._preprocess_image( image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , ) for img in video ] for video in videos ] lowercase : Tuple = {'''pixel_values''': videos} return BatchFeature(data=_A , tensor_type=_A )
308
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _A : def __init__( self : Optional[Any] , _A : str , _A : int=13 , _A : Optional[int]=7 , _A : List[Any]=True , _A : Optional[int]=True , _A : str=True , _A : Any=99 , _A : str=32 , _A : Optional[Any]=5 , _A : str=4 , _A : Optional[Any]=37 , _A : List[str]="gelu" , _A : int=0.1 , _A : List[Any]=0.1 , _A : str=512 , _A : Dict=16 , _A : Union[str, Any]=2 , _A : Optional[Any]=0.02 , _A : List[Any]=3 , _A : Dict=4 , _A : Union[str, Any]=None , ) -> Tuple: """simple docstring""" lowercase : List[str] = parent lowercase : Union[str, Any] = batch_size lowercase : Dict = seq_length lowercase : Dict = is_training lowercase : Optional[int] = use_token_type_ids lowercase : Tuple = use_labels lowercase : List[str] = vocab_size lowercase : Tuple = hidden_size lowercase : List[str] = num_hidden_layers lowercase : Optional[Any] = num_attention_heads lowercase : str = intermediate_size lowercase : int = hidden_act lowercase : Optional[Any] = hidden_dropout_prob lowercase : Optional[Any] = attention_probs_dropout_prob lowercase : List[str] = max_position_embeddings lowercase : List[str] = type_vocab_size lowercase : List[str] = type_sequence_label_size lowercase : Any = initializer_range lowercase : Union[str, Any] = num_labels lowercase : int = num_choices lowercase : Union[str, Any] = scope lowercase : Any = self.vocab_size - 1 def __a ( self : List[str] ) -> List[str]: """simple docstring""" lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Dict = None if self.use_token_type_ids: lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase : Optional[Any] = None lowercase : Optional[int] = None lowercase : List[str] = None if self.use_labels: lowercase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) lowercase : List[str] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowercase : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def __a ( self : Optional[int] , _A : str , _A : Optional[int] , _A : Union[str, Any] , _A : int , *_A : Union[str, Any] ) -> Tuple: """simple docstring""" lowercase : Tuple = OpenAIGPTModel(config=_A ) model.to(_A ) model.eval() lowercase : Dict = model(_A , token_type_ids=_A , head_mask=_A ) lowercase : Any = model(_A , token_type_ids=_A ) lowercase : Union[str, Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : List[Any] , _A : int , _A : Any , _A : List[Any] , _A : Union[str, Any] , *_A : Any ) -> List[str]: """simple docstring""" lowercase : Union[str, Any] = OpenAIGPTLMHeadModel(_A ) model.to(_A ) model.eval() lowercase : List[Any] = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : Tuple , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , *_A : List[str] ) -> str: """simple docstring""" lowercase : List[str] = OpenAIGPTDoubleHeadsModel(_A ) model.to(_A ) model.eval() lowercase : Optional[Any] = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : List[str] , _A : Optional[int] , _A : str , _A : List[Any] , _A : List[Any] , *_A : Union[str, Any] ) -> Optional[Any]: """simple docstring""" lowercase : List[Any] = self.num_labels lowercase : Tuple = OpenAIGPTForSequenceClassification(_A ) model.to(_A ) model.eval() lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self : Tuple ) -> List[str]: """simple docstring""" lowercase : List[Any] = self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Optional[Any] = config_and_inputs lowercase : Any = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_torch class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): _UpperCamelCase : Optional[Any] = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) _UpperCamelCase : List[Any] = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly _UpperCamelCase : Optional[Any] = ( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def __a ( self : Tuple , _A : Dict , _A : Tuple , _A : Dict , _A : Optional[Any] , _A : Any ) -> Optional[Any]: """simple docstring""" if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def __a ( self : Tuple , _A : List[str] , _A : List[str] , _A : Dict=False ) -> Optional[int]: """simple docstring""" lowercase : List[Any] = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowercase : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_A , ) lowercase : Union[str, Any] = inputs_dict['''labels'''] lowercase : Dict = inputs_dict['''labels'''] lowercase : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_A , ) lowercase : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) return inputs_dict def __a ( self : str ) -> Optional[Any]: """simple docstring""" lowercase : Tuple = OpenAIGPTModelTester(self ) lowercase : Any = ConfigTester(self , config_class=_A , n_embd=37 ) def __a ( self : List[str] ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def __a ( self : Union[str, Any] ) -> str: """simple docstring""" lowercase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_A ) def __a ( self : List[Any] ) -> str: """simple docstring""" lowercase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_A ) def __a ( self : str ) -> Any: """simple docstring""" lowercase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_A ) def __a ( self : Tuple ) -> List[str]: """simple docstring""" lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_A ) @slow def __a ( self : List[Any] ) -> Dict: """simple docstring""" for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : str = OpenAIGPTModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch class _A ( unittest.TestCase ): @slow def __a ( self : List[str] ) -> List[Any]: """simple docstring""" lowercase : Optional[int] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' ) model.to(_A ) lowercase : List[Any] = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=_A ) # the president is lowercase : Dict = [ 481, 4_735, 544, 246, 963, 870, 762, 239, 244, 40_477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowercase : int = model.generate(_A , do_sample=_A ) self.assertListEqual(output_ids[0].tolist() , _A )
308
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(_lowerCamelCase ) , '''Tatoeba directory does not exist.''' ) class _A ( unittest.TestCase ): @cached_property def __a ( self : int ) -> Dict: """simple docstring""" lowercase : str = tempfile.mkdtemp() return TatoebaConverter(save_dir=_A ) @slow def __a ( self : Any ) -> List[Any]: """simple docstring""" self.resolver.convert_models(['''heb-eng'''] ) @slow def __a ( self : int ) -> Tuple: """simple docstring""" lowercase , lowercase : Optional[Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_A ) assert mmeta["long_pair"] == "heb-eng"
308
1
from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class _A : _UpperCamelCase : torch.Tensor # [batch_size x 3] _UpperCamelCase : torch.Tensor # [batch_size x 3] _UpperCamelCase : torch.Tensor # [batch_size x 3] _UpperCamelCase : torch.Tensor # [batch_size x 3] _UpperCamelCase : int _UpperCamelCase : int _UpperCamelCase : float _UpperCamelCase : float _UpperCamelCase : Tuple[int] def __a ( self : Optional[Any] ) -> Dict: """simple docstring""" assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def __a ( self : Optional[Any] ) -> List[str]: """simple docstring""" return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def __a ( self : Optional[Any] ) -> int: """simple docstring""" return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def __a ( self : List[Any] ) -> torch.Tensor: """simple docstring""" lowercase : str = torch.arange(self.height * self.width ) lowercase : str = torch.stack( [ pixel_indices % self.width, torch.div(_A , self.width , rounding_mode='''trunc''' ), ] , axis=1 , ) return coords @property def __a ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" lowercase , *lowercase : List[Any] = self.shape lowercase : Optional[int] = int(np.prod(_A ) ) lowercase : str = self.get_image_coords() lowercase : Any = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) lowercase : Dict = self.get_camera_rays(_A ) lowercase : str = rays.view(_A , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def __a ( self : Any , _A : torch.Tensor ) -> torch.Tensor: """simple docstring""" lowercase , *lowercase , lowercase : Any = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] lowercase : int = coords.view(_A , -1 , 2 ) lowercase : List[Any] = self.resolution() lowercase : int = self.fov() lowercase : int = (flat.float() / (res - 1)) * 2 - 1 lowercase : List[str] = fracs * torch.tan(fov / 2 ) lowercase : str = fracs.view(_A , -1 , 2 ) lowercase : Any = ( self.z.view(_A , 1 , 3 ) + self.x.view(_A , 1 , 3 ) * fracs[:, :, :1] + self.y.view(_A , 1 , 3 ) * fracs[:, :, 1:] ) lowercase : List[str] = directions / directions.norm(dim=-1 , keepdim=_A ) lowercase : Optional[int] = torch.stack( [ torch.broadcast_to(self.origin.view(_A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(_A , *_A , 2 , 3 ) def __a ( self : Optional[int] , _A : int , _A : int ) -> "DifferentiableProjectiveCamera": """simple docstring""" assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=_A , height=_A , x_fov=self.x_fov , y_fov=self.y_fov , ) def snake_case( __magic_name__ ) -> DifferentiableProjectiveCamera: '''simple docstring''' lowercase : Optional[int] = [] lowercase : Optional[Any] = [] lowercase : Any = [] lowercase : Any = [] for theta in np.linspace(0 , 2 * np.pi , num=20 ): lowercase : List[str] = np.array([np.sin(__magic_name__ ), np.cos(__magic_name__ ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) lowercase : str = -z * 4 lowercase : Tuple = np.array([np.cos(__magic_name__ ), -np.sin(__magic_name__ ), 0.0] ) lowercase : Dict = np.cross(__magic_name__ , __magic_name__ ) origins.append(__magic_name__ ) xs.append(__magic_name__ ) ys.append(__magic_name__ ) zs.append(__magic_name__ ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(__magic_name__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__magic_name__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__magic_name__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__magic_name__ , axis=0 ) ).float() , width=__magic_name__ , height=__magic_name__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__magic_name__ )) , )
308
from __future__ import annotations from typing import Any def snake_case( __magic_name__ ) -> None: '''simple docstring''' create_state_space_tree(__magic_name__ , [] , 0 ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> None: '''simple docstring''' if index == len(__magic_name__ ): print(__magic_name__ ) return create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 ) current_subsequence.pop() if __name__ == "__main__": lowerCAmelCase_ = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(['A', 'B', 'C']) generate_all_subsequences(seq)
308
1
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _A ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): _UpperCamelCase : Any = StableDiffusionDiffEditPipeline _UpperCamelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} _UpperCamelCase : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} _UpperCamelCase : Optional[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _UpperCamelCase : str = frozenset([] ) def __a ( self : Optional[Any] ) -> Dict: """simple docstring""" torch.manual_seed(0 ) lowercase : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , ) lowercase : str = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , ) lowercase : str = DDIMInverseScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_zero=_A , ) torch.manual_seed(0 ) lowercase : Optional[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowercase : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , ) lowercase : Optional[int] = CLIPTextModel(_A ) lowercase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowercase : Tuple = { '''unet''': unet, '''scheduler''': scheduler, '''inverse_scheduler''': inverse_scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __a ( self : str , _A : List[str] , _A : Optional[Any]=0 ) -> Optional[Any]: """simple docstring""" lowercase : Tuple = floats_tensor((1, 16, 16) , rng=random.Random(_A ) ).to(_A ) lowercase : Optional[int] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_A ) ).to(_A ) if str(_A ).startswith('''mps''' ): lowercase : Tuple = torch.manual_seed(_A ) else: lowercase : Tuple = torch.Generator(device=_A ).manual_seed(_A ) lowercase : Optional[Any] = { '''prompt''': '''a dog and a newt''', '''mask_image''': mask, '''image_latents''': latents, '''generator''': generator, '''num_inference_steps''': 2, '''inpaint_strength''': 1.0, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def __a ( self : Tuple , _A : Optional[int] , _A : Dict=0 ) -> Union[str, Any]: """simple docstring""" lowercase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) lowercase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase : Optional[Any] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ) if str(_A ).startswith('''mps''' ): lowercase : Tuple = torch.manual_seed(_A ) else: lowercase : Tuple = torch.Generator(device=_A ).manual_seed(_A ) lowercase : Any = { '''image''': image, '''source_prompt''': '''a cat and a frog''', '''target_prompt''': '''a dog and a newt''', '''generator''': generator, '''num_inference_steps''': 2, '''num_maps_per_mask''': 2, '''mask_encode_strength''': 1.0, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def __a ( self : Optional[Any] , _A : str , _A : Union[str, Any]=0 ) -> Optional[int]: """simple docstring""" lowercase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) lowercase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ) if str(_A ).startswith('''mps''' ): lowercase : str = torch.manual_seed(_A ) else: lowercase : int = torch.Generator(device=_A ).manual_seed(_A ) lowercase : Dict = { '''image''': image, '''prompt''': '''a cat and a frog''', '''generator''': generator, '''num_inference_steps''': 2, '''inpaint_strength''': 1.0, '''guidance_scale''': 6.0, '''decode_latents''': True, '''output_type''': '''numpy''', } return inputs def __a ( self : List[Any] ) -> str: """simple docstring""" if not hasattr(self.pipeline_class , '''_optional_components''' ): return lowercase : Optional[Any] = self.get_dummy_components() lowercase : List[str] = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(_A , _A , _A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) lowercase : Optional[int] = self.get_dummy_inputs(_A ) lowercase : Tuple = pipe(**_A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_A ) lowercase : List[Any] = self.pipeline_class.from_pretrained(_A ) pipe_loaded.to(_A ) pipe_loaded.set_progress_bar_config(disable=_A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(_A , _A ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) lowercase : Dict = self.get_dummy_inputs(_A ) lowercase : Optional[Any] = pipe_loaded(**_A )[0] lowercase : List[Any] = np.abs(output - output_loaded ).max() self.assertLess(_A , 1E-4 ) def __a ( self : List[str] ) -> int: """simple docstring""" lowercase : int = '''cpu''' lowercase : Optional[int] = self.get_dummy_components() lowercase : Union[str, Any] = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) lowercase : Any = self.get_dummy_mask_inputs(_A ) lowercase : Dict = pipe.generate_mask(**_A ) lowercase : str = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) lowercase : Any = np.array([0] * 9 ) lowercase : int = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(_A , 1E-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def __a ( self : Optional[int] ) -> str: """simple docstring""" lowercase : Dict = '''cpu''' lowercase : Optional[Any] = self.get_dummy_components() lowercase : str = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) lowercase : Optional[int] = self.get_dummy_inversion_inputs(_A ) lowercase : int = pipe.invert(**_A ).images lowercase : Optional[int] = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowercase : Any = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) lowercase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_A , 1E-3 ) def __a ( self : Any ) -> List[str]: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def __a ( self : Dict ) -> Dict: """simple docstring""" lowercase : int = '''cpu''' lowercase : Union[str, Any] = self.get_dummy_components() lowercase : Optional[int] = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''} lowercase : List[Any] = DPMSolverMultistepScheduler(**_A ) lowercase : Union[str, Any] = DPMSolverMultistepInverseScheduler(**_A ) lowercase : int = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) lowercase : List[str] = self.get_dummy_inversion_inputs(_A ) lowercase : str = pipe.invert(**_A ).images lowercase : Any = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowercase : List[Any] = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) lowercase : int = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_A , 1E-3 ) @require_torch_gpu @slow class _A ( unittest.TestCase ): def __a ( self : Optional[int] ) -> List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def __a ( cls : int ) -> Any: """simple docstring""" lowercase : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' ) lowercase : Union[str, Any] = raw_image.convert('''RGB''' ).resize((768, 768) ) lowercase : int = raw_image def __a ( self : Dict ) -> Optional[int]: """simple docstring""" lowercase : str = torch.manual_seed(0 ) lowercase : Tuple = StableDiffusionDiffEditPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa ) lowercase : int = DDIMScheduler.from_config(pipe.scheduler.config ) lowercase : Tuple = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_A ) lowercase : Optional[int] = '''a bowl of fruit''' lowercase : List[str] = '''a bowl of pears''' lowercase : Tuple = pipe.generate_mask( image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , ) lowercase : Optional[Any] = pipe.invert( prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A ).latents lowercase : Union[str, Any] = pipe( prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0] lowercase : Tuple = ( np.array( load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/diffedit/pears.png''' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def __a ( self : List[str] ) -> Tuple: """simple docstring""" lowercase : Dict = torch.manual_seed(0 ) lowercase : List[str] = StableDiffusionDiffEditPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa ) lowercase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) lowercase : Optional[int] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_A ) lowercase : Dict = '''a bowl of fruit''' lowercase : List[Any] = '''a bowl of pears''' lowercase : Optional[int] = pipe.generate_mask( image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , ) lowercase : Union[str, Any] = pipe.invert( prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A , num_inference_steps=25 , ).latents lowercase : Any = pipe( prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0] lowercase : str = ( np.array( load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/diffedit/pears.png''' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
308
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging lowerCAmelCase_ = logging.get_logger(__name__) class _A ( _lowerCamelCase ): _UpperCamelCase : Dict = ['''input_features'''] def __init__( self : int , _A : int=80 , _A : Union[str, Any]=16_000 , _A : Union[str, Any]=160 , _A : Any=30 , _A : str=400 , _A : Union[str, Any]=0.0 , _A : Tuple=False , **_A : List[str] , ) -> int: """simple docstring""" super().__init__( feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , ) lowercase : Optional[Any] = n_fft lowercase : Optional[int] = hop_length lowercase : Optional[int] = chunk_length lowercase : Union[str, Any] = chunk_length * sampling_rate lowercase : Optional[Any] = self.n_samples // hop_length lowercase : Optional[Any] = sampling_rate lowercase : Union[str, Any] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , ) def __a ( self : Dict , _A : np.array ) -> np.ndarray: """simple docstring""" lowercase : List[str] = spectrogram( _A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , ) lowercase : Union[str, Any] = log_spec[:, :-1] lowercase : Optional[Any] = np.maximum(_A , log_spec.max() - 8.0 ) lowercase : str = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]: """simple docstring""" if attention_mask is not None: lowercase : Optional[Any] = np.array(_A , np.intaa ) lowercase : List[str] = [] for vector, length in zip(_A , attention_mask.sum(-1 ) ): lowercase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: lowercase : int = padding_value normed_input_values.append(_A ) else: lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self : Union[str, Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : int , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) lowercase : Union[str, Any] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) lowercase : Optional[Any] = is_batched_numpy or ( isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowercase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_A , np.ndarray ): lowercase : List[Any] = np.asarray(_A , dtype=np.floataa ) elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase : List[str] = [np.asarray([raw_speech] ).T] lowercase : Tuple = BatchFeature({'''input_features''': raw_speech} ) # convert into correct format for padding lowercase : str = self.pad( _A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: lowercase : Tuple = self.zero_mean_unit_var_norm( padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , ) lowercase : str = np.stack(padded_inputs['''input_features'''] , axis=0 ) # make sure list is in array format lowercase : List[str] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 ) lowercase : str = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]] if isinstance(input_features[0] , _A ): lowercase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features] else: lowercase : Optional[int] = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) lowercase : List[str] = padded_inputs['''attention_mask'''][:, :: self.hop_length] if return_tensors is not None: lowercase : Any = padded_inputs.convert_to_tensors(_A ) return padded_inputs def __a ( self : Optional[Any] ) -> Dict[str, Any]: """simple docstring""" lowercase : Optional[Any] = copy.deepcopy(self.__dict__ ) lowercase : Dict = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
308
1
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class _A ( _lowerCamelCase , unittest.TestCase ): _UpperCamelCase : Optional[int] = CanineTokenizer _UpperCamelCase : Any = False def __a ( self : List[str] ) -> Any: """simple docstring""" super().setUp() lowercase : List[str] = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __a ( self : Tuple ) -> int: """simple docstring""" return CanineTokenizer.from_pretrained('''google/canine-s''' ) def __a ( self : Union[str, Any] , **_A : Tuple ) -> CanineTokenizer: """simple docstring""" lowercase : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname , **_A ) lowercase : List[Any] = 1_024 return tokenizer @require_torch def __a ( self : List[str] ) -> Any: """simple docstring""" lowercase : Union[str, Any] = self.canine_tokenizer lowercase : List[str] = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.'''] # fmt: off lowercase : Union[str, Any] = [57_344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57_345, 0, 0, 0, 0] # fmt: on lowercase : Any = tokenizer(_A , padding=_A , return_tensors='''pt''' ) self.assertIsInstance(_A , _A ) lowercase : Optional[int] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(_A , _A ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def __a ( self : str ) -> Tuple: """simple docstring""" lowercase : Optional[Any] = self.canine_tokenizer lowercase : Any = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.'''] lowercase : Dict = tokenizer(_A , padding=_A , return_tensors='''pt''' ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn('''input_ids''' , _A ) self.assertIn('''attention_mask''' , _A ) self.assertIn('''token_type_ids''' , _A ) @require_torch def __a ( self : Any ) -> Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = self.canine_tokenizer lowercase : List[Any] = [ '''What\'s the weater?''', '''It\'s about 25 degrees.''', ] lowercase : Dict = tokenizer( text_target=_A , max_length=32 , padding='''max_length''' , truncation=_A , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def __a ( self : Any ) -> str: """simple docstring""" lowercase : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test lowercase : Tuple = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc lowercase : List[str] = tempfile.mkdtemp() lowercase : Optional[int] = ''' He is very happy, UNwant\u00E9d,running''' lowercase : List[Any] = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) lowercase : Dict = tokenizer.__class__.from_pretrained(_A ) lowercase : Any = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) shutil.rmtree(_A ) lowercase : Dict = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc lowercase : List[str] = tempfile.mkdtemp() lowercase : Any = ''' He is very happy, UNwant\u00E9d,running''' lowercase : Optional[Any] = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: lowercase : int = chr(0XE_007 ) additional_special_tokens.append(_A ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) lowercase : Any = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) lowercase : Any = tokenizer.__class__.from_pretrained(_A ) lowercase : Any = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) self.assertIn(_A , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) lowercase : int = tokenizer.__class__.from_pretrained(_A , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_A ) def __a ( self : int ) -> Union[str, Any]: """simple docstring""" lowercase : Dict = self.get_tokenizers(do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): lowercase , lowercase : Tuple = self.get_clean_sequence(_A ) # a special token for Canine can be defined as follows: lowercase : int = 0XE_005 lowercase : Optional[Any] = chr(_A ) tokenizer.add_special_tokens({'''cls_token''': special_token} ) lowercase : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A ) self.assertEqual(len(_A ) , 1 ) lowercase : Optional[Any] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_A ) lowercase : str = tokenizer.encode(_A , add_special_tokens=_A ) lowercase : int = tokenizer.encode(_A , add_special_tokens=_A ) lowercase : List[Any] = tokenizer.encode(_A , add_special_tokens=_A ) self.assertEqual(_A , input_encoded + special_token_id ) lowercase : int = tokenizer.decode(_A , skip_special_tokens=_A ) self.assertTrue(special_token not in decoded ) def __a ( self : Dict ) -> int: """simple docstring""" lowercase : Tuple = self.get_tokenizers(do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): lowercase : Any = chr(0XE_005 ) lowercase : str = chr(0XE_006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_A ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} ) lowercase : Tuple = tokenizer.tokenize(_A ) lowercase : Any = tokenizer.tokenize(_A ) self.assertEqual(len(_A ) , 1 ) self.assertEqual(len(_A ) , 1 ) self.assertEqual(token_a[0] , _A ) self.assertEqual(token_a[0] , _A ) @require_tokenizers def __a ( self : Any ) -> Any: """simple docstring""" lowercase : Union[str, Any] = self.get_tokenizers(do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): # a special token for Canine can be defined as follows: lowercase : Any = 0XE_006 lowercase : Optional[Any] = chr(_A ) lowercase : List[Any] = AddedToken(_A , lstrip=_A ) tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(_A ) tokenizer.from_pretrained(_A ) def __a ( self : Optional[Any] ) -> List[str]: """simple docstring""" lowercase : str = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) with open(os.path.join(_A , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: lowercase : Dict = json.load(_A ) with open(os.path.join(_A , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: lowercase : str = json.load(_A ) # a special token for Canine can be defined as follows: lowercase : Union[str, Any] = 0XE_006 lowercase : Union[str, Any] = chr(_A ) lowercase : List[str] = [new_token_a] lowercase : Optional[int] = [new_token_a] with open(os.path.join(_A , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(_A , _A ) with open(os.path.join(_A , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(_A , _A ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowercase : List[str] = tokenizer_class.from_pretrained(_A , extra_ids=0 ) self.assertIn(_A , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) lowercase : str = 0XE_007 lowercase : List[str] = chr(_A ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowercase : str = [AddedToken(_A , lstrip=_A )] lowercase : List[Any] = tokenizer_class.from_pretrained( _A , additional_special_tokens=_A , extra_ids=0 ) self.assertIn(_A , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def __a ( self : Optional[int] ) -> int: """simple docstring""" lowercase : Optional[Any] = self.get_tokenizers(do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): lowercase : Optional[Any] = '''hello world''' if self.space_between_special_tokens: lowercase : Tuple = '''[CLS] hello world [SEP]''' else: lowercase : Dict = input lowercase : int = tokenizer.encode(_A , add_special_tokens=_A ) lowercase : Union[str, Any] = tokenizer.decode(_A , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(_A , [output, output.lower()] ) def __a ( self : str ) -> List[str]: """simple docstring""" lowercase : int = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): lowercase : Tuple = [ '''bos_token''', '''eos_token''', '''unk_token''', '''sep_token''', '''pad_token''', '''cls_token''', '''mask_token''', ] lowercase : Optional[int] = '''a''' lowercase : Optional[Any] = ord(_A ) for attr in attributes_list: setattr(_A , attr + '''_id''' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '''_id''' ) , _A ) setattr(_A , attr + '''_id''' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '''_id''' ) , _A ) setattr(_A , '''additional_special_tokens_ids''' , [] ) self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [] ) self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [] ) lowercase : str = 0XE_006 lowercase : str = chr(_A ) setattr(_A , '''additional_special_tokens_ids''' , [additional_special_token_id] ) self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [additional_special_token] ) self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [additional_special_token_id] ) def __a ( self : List[str] ) -> Any: """simple docstring""" pass def __a ( self : Union[str, Any] ) -> Dict: """simple docstring""" pass def __a ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" pass def __a ( self : Any ) -> Dict: """simple docstring""" pass def __a ( self : Optional[Any] ) -> Tuple: """simple docstring""" pass def __a ( self : Any ) -> int: """simple docstring""" pass def __a ( self : Dict ) -> Dict: """simple docstring""" pass def __a ( self : List[str] ) -> Optional[Any]: """simple docstring""" pass
308
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class _A : def __init__( self : int , _A : Optional[int] , _A : Any=13 , _A : List[Any]=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : str=True , _A : Any=True , _A : Dict=True , _A : Optional[Any]=False , _A : Any=False , _A : List[str]=False , _A : Optional[int]=2 , _A : List[Any]=99 , _A : str=0 , _A : Dict=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]="last" , _A : List[str]=True , _A : Tuple=None , _A : Optional[Any]=0 , ) -> Any: """simple docstring""" lowercase : str = parent lowercase : Optional[Any] = batch_size lowercase : Union[str, Any] = seq_length lowercase : str = is_training lowercase : str = use_input_lengths lowercase : List[Any] = use_token_type_ids lowercase : Union[str, Any] = use_labels lowercase : Tuple = gelu_activation lowercase : Dict = sinusoidal_embeddings lowercase : Any = causal lowercase : str = asm lowercase : Optional[Any] = n_langs lowercase : Dict = vocab_size lowercase : Dict = n_special lowercase : List[Any] = hidden_size lowercase : str = num_hidden_layers lowercase : int = num_attention_heads lowercase : str = hidden_dropout_prob lowercase : Dict = attention_probs_dropout_prob lowercase : List[Any] = max_position_embeddings lowercase : Optional[int] = type_sequence_label_size lowercase : List[str] = initializer_range lowercase : List[str] = num_labels lowercase : int = num_choices lowercase : int = summary_type lowercase : Tuple = use_proj lowercase : Union[str, Any] = scope lowercase : List[str] = bos_token_id def __a ( self : Any ) -> Dict: """simple docstring""" lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase : str = None if self.use_input_lengths: lowercase : int = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase : Union[str, Any] = None if self.use_token_type_ids: lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase : Union[str, Any] = None lowercase : List[str] = None lowercase : Optional[Any] = None if self.use_labels: lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float() lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices ) lowercase : List[Any] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __a ( self : Any ) -> List[Any]: """simple docstring""" return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def __a ( self : int , _A : str , _A : Optional[Any] , _A : int , _A : List[str] , _A : Any , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : Tuple , ) -> List[Any]: """simple docstring""" lowercase : List[Any] = XLMModel(config=_A ) model.to(_A ) model.eval() lowercase : Tuple = model(_A , lengths=_A , langs=_A ) lowercase : Dict = model(_A , langs=_A ) lowercase : int = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : int , _A : Dict , _A : int , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : Dict , ) -> Optional[Any]: """simple docstring""" lowercase : Optional[int] = XLMWithLMHeadModel(_A ) model.to(_A ) model.eval() lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Tuple , _A : int , ) -> Union[str, Any]: """simple docstring""" lowercase : Dict = XLMForQuestionAnsweringSimple(_A ) model.to(_A ) model.eval() lowercase : List[str] = model(_A ) lowercase : Any = model(_A , start_positions=_A , end_positions=_A ) lowercase : Any = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : Any , _A : str , _A : Union[str, Any] , ) -> Dict: """simple docstring""" lowercase : Optional[int] = XLMForQuestionAnswering(_A ) model.to(_A ) model.eval() lowercase : Any = model(_A ) lowercase : Tuple = model( _A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , ) lowercase : Optional[int] = model( _A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , ) ((lowercase) , ) : Optional[int] = result_with_labels.to_tuple() lowercase : List[str] = model(_A , start_positions=_A , end_positions=_A ) ((lowercase) , ) : Any = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def __a ( self : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , ) -> int: """simple docstring""" lowercase : List[str] = XLMForSequenceClassification(_A ) model.to(_A ) model.eval() lowercase : List[str] = model(_A ) lowercase : Union[str, Any] = model(_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Union[str, Any] , _A : str , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , ) -> Dict: """simple docstring""" lowercase : Optional[Any] = self.num_labels lowercase : Tuple = XLMForTokenClassification(_A ) model.to(_A ) model.eval() lowercase : str = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self : List[Any] , _A : List[str] , _A : Dict , _A : str , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , ) -> Union[str, Any]: """simple docstring""" lowercase : int = self.num_choices lowercase : List[Any] = XLMForMultipleChoice(config=_A ) model.to(_A ) model.eval() lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Dict = model( _A , attention_mask=_A , token_type_ids=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" lowercase : List[Any] = self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Union[str, Any] = config_and_inputs lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths} return config, inputs_dict @require_torch class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): _UpperCamelCase : Any = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _UpperCamelCase : str = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _UpperCamelCase : Tuple = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def __a ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] ) -> List[Any]: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __a ( self : Dict , _A : Tuple , _A : List[str] , _A : int=False ) -> Optional[Any]: """simple docstring""" lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": lowercase : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) lowercase : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) return inputs_dict def __a ( self : Any ) -> List[str]: """simple docstring""" lowercase : List[str] = XLMModelTester(self ) lowercase : Any = ConfigTester(self , config_class=_A , emb_dim=37 ) def __a ( self : List[Any] ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def __a ( self : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*_A ) def __a ( self : Any ) -> Dict: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*_A ) def __a ( self : List[str] ) -> Optional[int]: """simple docstring""" lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*_A ) def __a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*_A ) def __a ( self : List[str] ) -> Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*_A ) def __a ( self : Dict ) -> int: """simple docstring""" lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*_A ) def __a ( self : Any ) -> List[Any]: """simple docstring""" lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*_A ) def __a ( self : int , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any: """simple docstring""" self.assertIsInstance(_A , _A ) self.assertListEqual( [isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) ) self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(_A ): # adds PAD dummy token lowercase : List[Any] = min_length + idx + 1 lowercase : str = min_length + idx + 1 lowercase : Any = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) ) def __a ( self : int , _A : Optional[int] , _A : Dict , _A : Any , _A : List[str] , _A : Optional[int] , _A : List[Any]=False , _A : List[Any]=1 ) -> str: """simple docstring""" self.assertIsInstance(_A , _A ) self.assertListEqual( [isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , ) self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(_A ): # adds PAD dummy token lowercase : Union[str, Any] = min_length + idx + 1 lowercase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , ) pass @slow def __a ( self : Optional[int] ) -> Any: """simple docstring""" for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : Any = XLMModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch class _A ( unittest.TestCase ): @slow def __a ( self : Any ) -> Optional[Any]: """simple docstring""" lowercase : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' ) model.to(_A ) lowercase : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president lowercase : List[str] = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference lowercase : Dict = model.generate(_A , do_sample=_A ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
308
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { 'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json', } class _A ( _lowerCamelCase ): _UpperCamelCase : int = '''mgp-str''' def __init__( self : Optional[int] , _A : Optional[Any]=[32, 128] , _A : Dict=4 , _A : Tuple=3 , _A : int=27 , _A : List[Any]=38 , _A : Any=50_257 , _A : Dict=30_522 , _A : str=768 , _A : Union[str, Any]=12 , _A : Dict=12 , _A : str=4.0 , _A : str=True , _A : Optional[int]=False , _A : Optional[int]=1E-5 , _A : Union[str, Any]=0.0 , _A : Any=0.0 , _A : Dict=0.0 , _A : List[Any]=False , _A : str=0.02 , **_A : Optional[Any] , ) -> Dict: """simple docstring""" super().__init__(**_A ) lowercase : Optional[Any] = image_size lowercase : List[Any] = patch_size lowercase : Optional[int] = num_channels lowercase : Dict = max_token_length lowercase : str = num_character_labels lowercase : int = num_bpe_labels lowercase : str = num_wordpiece_labels lowercase : List[Any] = hidden_size lowercase : List[Any] = num_hidden_layers lowercase : str = num_attention_heads lowercase : List[str] = mlp_ratio lowercase : Optional[Any] = distilled lowercase : Tuple = layer_norm_eps lowercase : int = drop_rate lowercase : List[Any] = qkv_bias lowercase : Any = attn_drop_rate lowercase : Optional[int] = drop_path_rate lowercase : List[Any] = output_aa_attentions lowercase : int = initializer_range
308
def snake_case( __magic_name__ = 50 ) -> int: '''simple docstring''' lowercase : Union[str, Any] = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
308
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCAmelCase_ = { 'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTNeoForCausalLM', 'GPTNeoForQuestionAnswering', 'GPTNeoForSequenceClassification', 'GPTNeoForTokenClassification', 'GPTNeoModel', 'GPTNeoPreTrainedModel', 'load_tf_weights_in_gpt_neo', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'FlaxGPTNeoForCausalLM', 'FlaxGPTNeoModel', 'FlaxGPTNeoPreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
308
import os def snake_case( __magic_name__ = "input.txt" ) -> int: '''simple docstring''' with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file: lowercase : Any = [ [int(__magic_name__ ) for element in line.split(''',''' )] for line in input_file.readlines() ] lowercase : List[Any] = len(__magic_name__ ) lowercase : Any = len(matrix[0] ) lowercase : Tuple = [[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )] for i in range(__magic_name__ ): lowercase : str = matrix[i][0] for j in range(1 , __magic_name__ ): for i in range(__magic_name__ ): lowercase : Any = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , __magic_name__ ): lowercase : Any = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowercase : Any = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'''{solution() = }''')
308
1
import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowerCAmelCase_ = { 'text_branch': 'text_model', 'audio_branch': 'audio_model.audio_encoder', 'attn': 'attention.self', 'self.proj': 'output.dense', 'attention.self_mask': 'attn_mask', 'mlp.fc1': 'intermediate.dense', 'mlp.fc2': 'output.dense', 'norm1': 'layernorm_before', 'norm2': 'layernorm_after', 'bn0': 'batch_norm', } lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc') def snake_case( __magic_name__ , __magic_name__=False ) -> List[str]: '''simple docstring''' lowercase , lowercase : Dict = create_model( '''HTSAT-tiny''' , '''roberta''' , __magic_name__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=__magic_name__ , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def snake_case( __magic_name__ ) -> Tuple: '''simple docstring''' lowercase : Dict = {} lowercase : Optional[int] = r'''.*sequential.(\d+).*''' lowercase : Tuple = r'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: lowercase : str = key.replace(__magic_name__ , __magic_name__ ) if re.match(__magic_name__ , __magic_name__ ): # replace sequential layers with list lowercase : Any = re.match(__magic_name__ , __magic_name__ ).group(1 ) lowercase : Optional[Any] = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__magic_name__ )//3}.linear.""" ) elif re.match(__magic_name__ , __magic_name__ ): lowercase : str = int(re.match(__magic_name__ , __magic_name__ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... lowercase : int = 1 if projecton_layer == 0 else 2 lowercase : Optional[int] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value lowercase : Optional[Any] = value lowercase : List[Any] = mixed_qkv.size(0 ) // 3 lowercase : Optional[int] = mixed_qkv[:qkv_dim] lowercase : str = mixed_qkv[qkv_dim : qkv_dim * 2] lowercase : Union[str, Any] = mixed_qkv[qkv_dim * 2 :] lowercase : Union[str, Any] = query_layer lowercase : Dict = key_layer lowercase : Tuple = value_layer else: lowercase : Tuple = value return model_state_dict def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False ) -> Dict: '''simple docstring''' lowercase , lowercase : List[Any] = init_clap(__magic_name__ , enable_fusion=__magic_name__ ) clap_model.eval() lowercase : Any = clap_model.state_dict() lowercase : int = rename_state_dict(__magic_name__ ) lowercase : Dict = ClapConfig() lowercase : Tuple = enable_fusion lowercase : List[Any] = ClapModel(__magic_name__ ) # ignore the spectrogram embedding layer model.load_state_dict(__magic_name__ , strict=__magic_name__ ) model.save_pretrained(__magic_name__ ) transformers_config.save_pretrained(__magic_name__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not') lowerCAmelCase_ = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
308
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): @slow def __a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' ) lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' ) lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids lowercase : List[Any] = model(_A , labels=_A ).loss lowercase : Dict = -tf.math.reduce_mean(_A ).numpy() lowercase : Union[str, Any] = -21.228_168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
308
1
# Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCAmelCase_ = get_logger(__name__) class _A : _UpperCamelCase : int = '''dummy_data''' _UpperCamelCase : Tuple = '''datasets''' _UpperCamelCase : Optional[int] = False def __init__( self : Any , _A : str , _A : str , _A : Union[Version, str] , _A : Optional[str] = None , _A : bool = False , _A : bool = True , _A : Optional[List[Callable]] = None , ) -> Dict: """simple docstring""" lowercase : Tuple = 0 lowercase : List[Any] = dataset_name lowercase : int = cache_dir lowercase : str = use_local_dummy_data lowercase : Union[str, Any] = config # download_callbacks take a single url as input lowercase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root lowercase : Any = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general lowercase : Union[str, Any] = str(_A ) # to be downloaded lowercase : Tuple = None lowercase : Optional[int] = None @property def __a ( self : str ) -> Dict: """simple docstring""" if self._dummy_file is None: lowercase : Optional[Any] = self.download_dummy_data() return self._dummy_file @property def __a ( self : int ) -> Optional[Any]: """simple docstring""" if self.config is not None: # structure is dummy / config_name / version_name return os.path.join('''dummy''' , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join('''dummy''' , self.version_name ) @property def __a ( self : List[Any] ) -> int: """simple docstring""" return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' ) def __a ( self : str ) -> int: """simple docstring""" lowercase : str = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) lowercase : List[str] = cached_path( _A , cache_dir=self.cache_dir , extract_compressed_file=_A , force_extract=_A ) return os.path.join(_A , self.dummy_file_name ) @property def __a ( self : str ) -> Tuple: """simple docstring""" return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def __a ( self : Optional[int] ) -> Optional[int]: """simple docstring""" if self._bucket_url is None: lowercase : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) ) return self._bucket_url @property def __a ( self : Tuple ) -> List[str]: """simple docstring""" if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] ) def __a ( self : Union[str, Any] , _A : Dict , *_A : Union[str, Any] ) -> Optional[Any]: """simple docstring""" if self.load_existing_dummy_data: # dummy data is downloaded and tested lowercase : Union[str, Any] = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned lowercase : Optional[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(_A , _A ): return self.create_dummy_data_dict(_A , _A ) elif isinstance(_A , (list, tuple) ): return self.create_dummy_data_list(_A , _A ) else: return self.create_dummy_data_single(_A , _A ) def __a ( self : str , _A : Union[str, Any] , *_A : Dict ) -> Dict: """simple docstring""" return self.download_and_extract(_A ) def __a ( self : str , _A : List[str] , _A : Any ) -> Union[str, Any]: """simple docstring""" return self.download_and_extract(_A ) def __a ( self : Optional[int] , _A : Tuple , *_A : str , **_A : Any ) -> Optional[Any]: """simple docstring""" return path def __a ( self : List[str] ) -> str: """simple docstring""" return {} def __a ( self : List[str] , _A : Union[str, Any] , _A : List[Any] ) -> Optional[Any]: """simple docstring""" lowercase : Any = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(_A , _A ): for single_url in single_urls: download_callback(_A ) else: lowercase : List[str] = single_urls download_callback(_A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(_A , _A ): lowercase : int = [os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) for x in single_urls] else: lowercase : int = single_urls lowercase : Any = os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) lowercase : str = value # make sure that values are unique if all(isinstance(_A , _A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique lowercase : str = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def __a ( self : Optional[int] , _A : List[Any] , _A : Tuple ) -> Tuple: """simple docstring""" lowercase : Optional[Any] = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one lowercase : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , _A ) ) for url in data_url ) lowercase : str = all( url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): lowercase : List[str] = [data_url[0]] * len(_A ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(_A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowercase : Optional[int] = os.path.join(_A , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) ) dummy_data_list.append(_A ) return dummy_data_list def __a ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ) -> List[str]: """simple docstring""" for download_callback in self.download_callbacks: download_callback(_A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowercase : Dict = os.path.join(_A , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) ) if os.path.exists(_A ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def __a ( self : Union[str, Any] ) -> Any: """simple docstring""" pass def __a ( self : Any ) -> Dict: """simple docstring""" pass def __a ( self : int , _A : Optional[Any] ) -> Dict: """simple docstring""" def _iter_archive_members(_A : Optional[int] ): # this preserves the order of the members inside the ZIP archive lowercase : int = Path(self.dummy_file ).parent lowercase : List[str] = path.relative_to(_A ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: lowercase : Optional[int] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(_A ) lowercase : Tuple = Path(_A ) lowercase : List[Any] = _iter_archive_members(_A ) if self.use_local_dummy_data else path.rglob('''*''' ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ): yield file_path.relative_to(_A ).as_posix(), file_path.open('''rb''' ) def __a ( self : Optional[Any] , _A : Dict ) -> Union[str, Any]: """simple docstring""" if not isinstance(_A , _A ): lowercase : Dict = [paths] for path in paths: if os.path.isfile(_A ): if os.path.basename(_A ).startswith(('''.''', '''__''') ): return yield path else: for dirpath, dirnames, filenames in os.walk(_A ): if os.path.basename(_A ).startswith(('''.''', '''__''') ): continue dirnames.sort() for filename in sorted(_A ): if filename.startswith(('''.''', '''__''') ): continue yield os.path.join(_A , _A )
308
from heapq import heappop, heappush import numpy as np def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> tuple[float | int, list[tuple[int, int]]]: '''simple docstring''' lowercase , lowercase : Optional[int] = grid.shape lowercase : Optional[int] = [-1, 1, 0, 0] lowercase : List[str] = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] lowercase , lowercase : Union[str, Any] = [(0, source)], set() lowercase : List[str] = np.full((rows, cols) , np.inf ) lowercase : Dict = 0 lowercase : Dict = np.empty((rows, cols) , dtype=__magic_name__ ) lowercase : Any = None while queue: ((lowercase) , (lowercase)) : Optional[Any] = heappop(__magic_name__ ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: lowercase : Tuple = [] while (x, y) != source: path.append((x, y) ) lowercase , lowercase : Optional[int] = predecessors[x, y] path.append(__magic_name__ ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(__magic_name__ ) ): lowercase , lowercase : Optional[int] = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: lowercase : List[Any] = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(__magic_name__ , (dist + 1, (nx, ny)) ) lowercase : int = dist + 1 lowercase : Optional[Any] = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
308
1
import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class _A ( _lowerCamelCase ): def __init__( self : Tuple , _A : Dict , _A : Tuple , _A : List[Any]=1_024 , _A : str=1_024 , _A : str=3.6 ) -> Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = tokenizer lowercase : List[Any] = tokenizer.bos_token_id lowercase : Union[str, Any] = dataset lowercase : Union[str, Any] = seq_length lowercase : Optional[int] = seq_length * chars_per_token * num_of_sequences def __iter__( self : int ) -> int: """simple docstring""" lowercase : Dict = iter(self.dataset ) lowercase : Union[str, Any] = True while more_examples: lowercase , lowercase : Tuple = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(_A )['''content'''] ) buffer_len += len(buffer[-1] ) except StopIteration: lowercase : List[str] = False break lowercase : str = tokenizer(_A , truncation=_A )['''input_ids'''] lowercase : List[str] = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(_A ) , self.seq_length ): lowercase : int = all_token_ids[i : i + self.seq_length] if len(_A ) == self.seq_length: yield torch.tensor(_A ) def snake_case( __magic_name__ ) -> Optional[Any]: '''simple docstring''' lowercase : List[str] = {'''streaming''': True} lowercase : Dict = load_dataset(args.dataset_name , split='''train''' , **__magic_name__ ) lowercase : int = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length ) lowercase : Tuple = DataLoader(__magic_name__ , batch_size=args.batch_size ) return eval_dataloader def snake_case( __magic_name__ ) -> str: '''simple docstring''' model.eval() lowercase : str = [] for step, batch in enumerate(__magic_name__ ): with torch.no_grad(): lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ ) lowercase : List[Any] = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(__magic_name__ ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break lowercase : Union[str, Any] = torch.mean(torch.cat(__magic_name__ ) ) try: lowercase : Tuple = torch.exp(__magic_name__ ) except OverflowError: lowercase : List[str] = float('''inf''' ) return loss.item(), perplexity.item() # Setup Accelerator lowerCAmelCase_ = Accelerator() # Parse configuration lowerCAmelCase_ = HfArgumentParser(EvaluationArguments) lowerCAmelCase_ = parser.parse_args() set_seed(args.seed) # Logging lowerCAmelCase_ = logging.getLogger(__name__) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) # Load model and tokenizer lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt) lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader lowerCAmelCase_ = create_dataloader(args) # Prepare everything with our `accelerator`. lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('Evaluating and saving model after training') lowerCAmelCase_ , lowerCAmelCase_ = evaluate(args) logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
308
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase_ = { 'configuration_mask2former': [ 'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Mask2FormerConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['Mask2FormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'Mask2FormerForUniversalSegmentation', 'Mask2FormerModel', 'Mask2FormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
308
1
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class _A ( _lowerCamelCase ): def __init__( self : Any , _A : List[Any] , _A : List[Any] , _A : List[Any] ) -> int: """simple docstring""" lowercase : Any = dataset lowercase : Optional[Any] = process lowercase : Optional[int] = params def __len__( self : Any ) -> int: """simple docstring""" return len(self.dataset ) def __getitem__( self : List[Any] , _A : List[Any] ) -> Any: """simple docstring""" lowercase : List[str] = self.dataset[i] lowercase : int = self.process(_A , **self.params ) return processed class _A ( _lowerCamelCase ): def __init__( self : int , _A : List[str] , _A : Optional[int] , _A : str , _A : Union[str, Any]=None ) -> Optional[Any]: """simple docstring""" lowercase : str = loader lowercase : Optional[int] = infer lowercase : Dict = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether lowercase : List[Any] = None lowercase : Any = loader_batch_size # Internal bookkeeping lowercase : List[str] = None lowercase : Any = None def __len__( self : Dict ) -> Optional[int]: """simple docstring""" return len(self.loader ) def __iter__( self : Union[str, Any] ) -> int: """simple docstring""" lowercase : Tuple = iter(self.loader ) return self def __a ( self : Union[str, Any] ) -> int: """simple docstring""" if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice lowercase : str = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) lowercase : Any = {} for k, element in self._loader_batch_data.items(): if isinstance(_A , _A ): # Convert ModelOutput to tuple first lowercase : Optional[int] = element.to_tuple() if isinstance(element[0] , torch.Tensor ): lowercase : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowercase : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_A , _A ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): lowercase : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowercase : Optional[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around lowercase : Any = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowercase : List[Any] = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowercase : Union[str, Any] = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. lowercase : List[str] = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 lowercase : str = self._loader_batch_data.__class__(_A ) self._loader_batch_index += 1 return result def __a ( self : List[str] ) -> Union[str, Any]: """simple docstring""" if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch lowercase : Any = next(self.iterator ) lowercase : Union[str, Any] = self.infer(_A , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_A , torch.Tensor ): lowercase : str = processed else: lowercase : Optional[int] = list(processed.keys() )[0] lowercase : int = processed[key] if isinstance(_A , _A ): lowercase : Optional[int] = len(_A ) else: lowercase : str = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowercase : str = observed_batch_size # Setting internal index to unwrap the batch lowercase : int = processed lowercase : Optional[int] = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class _A ( _lowerCamelCase ): def __init__( self : List[str] , _A : Dict , _A : int , _A : List[str] , _A : Tuple=None ) -> Union[str, Any]: """simple docstring""" super().__init__(_A , _A , _A ) def __iter__( self : Union[str, Any] ) -> List[Any]: """simple docstring""" lowercase : Union[str, Any] = iter(self.loader ) lowercase : str = None return self def __a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" if self.subiterator is None: lowercase : List[str] = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item lowercase : Union[str, Any] = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators lowercase : List[Any] = self.infer(next(self.iterator ) , **self.params ) lowercase : Dict = next(self.subiterator ) return processed class _A ( _lowerCamelCase ): def __iter__( self : Optional[int] ) -> List[Any]: """simple docstring""" lowercase : Optional[Any] = iter(self.loader ) return self def __a ( self : int ) -> int: """simple docstring""" lowercase : Dict = False lowercase : Union[str, Any] = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: lowercase : Any = self.loader_batch_item() lowercase : List[Any] = item.pop('''is_last''' ) accumulator.append(_A ) if is_last: return accumulator while not is_last: lowercase : int = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_A , torch.Tensor ): lowercase : Optional[int] = processed else: lowercase : Any = list(processed.keys() )[0] lowercase : str = processed[key] if isinstance(_A , _A ): lowercase : List[str] = len(_A ) else: lowercase : Tuple = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowercase : Any = observed_batch_size lowercase : Optional[Any] = processed lowercase : Any = 0 while self._loader_batch_index < self.loader_batch_size: lowercase : Any = self.loader_batch_item() lowercase : int = item.pop('''is_last''' ) accumulator.append(_A ) if is_last: return accumulator else: lowercase : List[Any] = processed lowercase : Union[str, Any] = item.pop('''is_last''' ) accumulator.append(_A ) return accumulator class _A ( _lowerCamelCase ): def __init__( self : Dict , _A : Dataset , _A : str ) -> List[Any]: """simple docstring""" lowercase : Dict = dataset lowercase : List[Any] = key def __len__( self : Tuple ) -> Any: """simple docstring""" return len(self.dataset ) def __getitem__( self : Dict , _A : str ) -> List[str]: """simple docstring""" return self.dataset[i][self.key] class _A ( _lowerCamelCase ): def __init__( self : Any , _A : Dataset , _A : str , _A : str ) -> Dict: """simple docstring""" lowercase : Optional[Any] = dataset lowercase : Tuple = keya lowercase : List[Any] = keya def __len__( self : int ) -> Any: """simple docstring""" return len(self.dataset ) def __getitem__( self : Tuple , _A : Optional[Any] ) -> Optional[int]: """simple docstring""" return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
308
def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : List[Any] = abs(__magic_name__ ) lowercase : Optional[Any] = 0 while n > 0: res += n % 10 n //= 10 return res def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : Optional[int] = abs(__magic_name__ ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def snake_case( __magic_name__ ) -> int: '''simple docstring''' return sum(int(__magic_name__ ) for c in str(abs(__magic_name__ ) ) ) def snake_case( ) -> None: '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(__magic_name__ , __magic_name__ ) -> None: lowercase : str = F"""{func.__name__}({value})""" lowercase : Any = timeit(F"""__main__.{call}""" , setup='''import __main__''' ) print(F"""{call:56} = {func(__magic_name__ )} -- {timing:.4f} seconds""" ) for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(__magic_name__ , __magic_name__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
308
1
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def snake_case( __magic_name__ , __magic_name__ , __magic_name__ = "x" , __magic_name__ = 10**-10 , __magic_name__ = 1 , ) -> complex: '''simple docstring''' lowercase : List[str] = symbols(__magic_name__ ) lowercase : Optional[Any] = lambdify(__magic_name__ , __magic_name__ ) lowercase : str = lambdify(__magic_name__ , diff(__magic_name__ , __magic_name__ ) ) lowercase : str = starting_point while True: if diff_function(__magic_name__ ) != 0: lowercase : List[str] = prev_guess - multiplicity * func(__magic_name__ ) / diff_function( __magic_name__ ) else: raise ZeroDivisionError('''Could not find root''' ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess lowercase : Union[str, Any] = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''') # Find root of polynomial # Find fourth Root of 5 print(f'''The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}''') # Find value of e print( 'The root of log(y) - 1 = 0 is ', f'''{newton_raphson("log(y) - 1", 2, variable="y")}''', ) # Exponential Roots print( 'The root of exp(x) - 1 = 0 is', f'''{newton_raphson("exp(x) - 1", 10, precision=0.0_0_5)}''', ) # Find root of cos(x) print(f'''The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}''')
308
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def snake_case( ) -> List[str]: '''simple docstring''' lowercase : Any = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=__magic_name__ ) lowercase : Optional[Any] = parser.add_subparsers(help='''accelerate command helpers''' ) # Register commands get_config_parser(subparsers=__magic_name__ ) env_command_parser(subparsers=__magic_name__ ) launch_command_parser(subparsers=__magic_name__ ) tpu_command_parser(subparsers=__magic_name__ ) test_command_parser(subparsers=__magic_name__ ) # Let's go lowercase : Dict = parser.parse_args() if not hasattr(__magic_name__ , '''func''' ): parser.print_help() exit(1 ) # Run args.func(__magic_name__ ) if __name__ == "__main__": main()
308
1
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowerCAmelCase_ = 25_00_04 lowerCAmelCase_ = 25_00_20 @require_sentencepiece @require_tokenizers class _A ( _lowerCamelCase , unittest.TestCase ): _UpperCamelCase : Dict = MBartaaTokenizer _UpperCamelCase : List[Any] = MBartaaTokenizerFast _UpperCamelCase : Optional[int] = True _UpperCamelCase : str = True def __a ( self : int ) -> str: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowercase : Any = MBartaaTokenizer(_A , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_A ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self : str ) -> Tuple: """simple docstring""" lowercase : str = '''<s>''' lowercase : List[Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def __a ( self : Tuple ) -> Optional[Any]: """simple docstring""" lowercase : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(_A ) , 1_054 ) def __a ( self : Any ) -> str: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_054 ) def __a ( self : List[str] ) -> str: """simple docstring""" lowercase : str = MBartaaTokenizer(_A , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_A ) lowercase : str = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowercase : str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _A , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) lowercase : int = tokenizer.convert_tokens_to_ids(_A ) self.assertListEqual( _A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowercase : str = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual( _A , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def __a ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" lowercase : int = {'''input_ids''': [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , ) def __a ( self : Any ) -> str: """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowercase : List[str] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) lowercase : str = self.tokenizer_class.from_pretrained(_A , **_A ) lowercase : Tuple = tempfile.mkdtemp() lowercase : str = tokenizer_r.save_pretrained(_A ) lowercase : Optional[int] = tokenizer_p.save_pretrained(_A ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) lowercase : Dict = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(_A , _A ) # Checks everything loads correctly in the same way lowercase : Any = tokenizer_r.from_pretrained(_A ) lowercase : Union[str, Any] = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_A ) # Save tokenizer rust, legacy_format=True lowercase : Union[str, Any] = tempfile.mkdtemp() lowercase : Union[str, Any] = tokenizer_r.save_pretrained(_A , legacy_format=_A ) lowercase : List[str] = tokenizer_p.save_pretrained(_A ) # Checks it save with the same files self.assertSequenceEqual(_A , _A ) # Checks everything loads correctly in the same way lowercase : List[str] = tokenizer_r.from_pretrained(_A ) lowercase : List[str] = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) shutil.rmtree(_A ) # Save tokenizer rust, legacy_format=False lowercase : Optional[Any] = tempfile.mkdtemp() lowercase : Dict = tokenizer_r.save_pretrained(_A , legacy_format=_A ) lowercase : Any = tokenizer_p.save_pretrained(_A ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowercase : Tuple = tokenizer_r.from_pretrained(_A ) lowercase : Any = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) shutil.rmtree(_A ) @require_torch @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): _UpperCamelCase : Dict = '''facebook/mbart-large-50-one-to-many-mmt''' _UpperCamelCase : str = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''', ] _UpperCamelCase : Dict = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei''' ''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor''' ''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] _UpperCamelCase : List[Any] = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2] @classmethod def __a ( cls : int ) -> List[str]: """simple docstring""" lowercase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) lowercase : Tuple = 1 return cls def __a ( self : Optional[int] ) -> str: """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250_001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250_004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250_020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 250_038 ) def __a ( self : Optional[Any] ) -> str: """simple docstring""" lowercase : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , _A ) def __a ( self : int ) -> Tuple: """simple docstring""" self.assertIn(_A , self.tokenizer.all_special_ids ) lowercase : Tuple = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2] lowercase : Optional[Any] = self.tokenizer.decode(_A , skip_special_tokens=_A ) lowercase : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A ) self.assertEqual(_A , _A ) self.assertNotIn(self.tokenizer.eos_token , _A ) def __a ( self : str ) -> Optional[Any]: """simple docstring""" lowercase : int = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , _A ) lowercase : Optional[int] = 10 lowercase : Optional[Any] = self.tokenizer(_A , max_length=_A , truncation=_A ).input_ids[0] self.assertEqual(ids[0] , _A ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(_A ) , _A ) def __a ( self : str ) -> str: """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250_053, 250_001] ) def __a ( self : Any ) -> str: """simple docstring""" lowercase : Dict = tempfile.mkdtemp() lowercase : Optional[int] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_A ) lowercase : Optional[int] = MBartaaTokenizer.from_pretrained(_A ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _A ) @require_torch def __a ( self : Dict ) -> Optional[int]: """simple docstring""" lowercase : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors='''pt''' ) lowercase : str = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def __a ( self : Optional[int] ) -> int: """simple docstring""" lowercase : List[str] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=_A , truncation=_A , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) lowercase : int = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(_A , _A ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) lowercase : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , _A ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def __a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowercase : Dict = self.tokenizer(self.src_text , padding=_A , truncation=_A , max_length=3 , return_tensors='''pt''' ) lowercase : Dict = self.tokenizer( text_target=self.tgt_text , padding=_A , truncation=_A , max_length=10 , return_tensors='''pt''' ) lowercase : Union[str, Any] = targets['''input_ids'''] lowercase : Dict = shift_tokens_right(_A , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def __a ( self : str ) -> Optional[Any]: """simple docstring""" lowercase : Dict = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(_A ) , { # en_XX, A, test, EOS '''input_ids''': [[250_004, 62, 3_034, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 250_001, } , )
308
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def snake_case( __magic_name__ , __magic_name__=False ) -> List[str]: '''simple docstring''' lowercase : List[Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''module.cls_token''', '''vit.embeddings.cls_token'''), ('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''module.pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''module.norm.weight''', '''layernorm.weight'''), ('''module.norm.bias''', '''layernorm.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowercase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def snake_case( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: lowercase : Optional[int] = '''''' else: lowercase : List[Any] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase : Tuple = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" ) lowercase : List[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowercase : Tuple = in_proj_weight[ : config.hidden_size, : ] lowercase : str = in_proj_bias[: config.hidden_size] lowercase : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase : Dict = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase : Any = in_proj_weight[ -config.hidden_size :, : ] lowercase : Optional[int] = in_proj_bias[-config.hidden_size :] def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : str = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__magic_name__ , __magic_name__ ) def snake_case( __magic_name__ ) -> Tuple: '''simple docstring''' lowercase : Any = [ '''module.fc.fc1.weight''', '''module.fc.fc1.bias''', '''module.fc.bn1.weight''', '''module.fc.bn1.bias''', '''module.fc.bn1.running_mean''', '''module.fc.bn1.running_var''', '''module.fc.bn1.num_batches_tracked''', '''module.fc.fc2.weight''', '''module.fc.fc2.bias''', '''module.fc.bn2.weight''', '''module.fc.bn2.bias''', '''module.fc.bn2.running_mean''', '''module.fc.bn2.running_var''', '''module.fc.bn2.num_batches_tracked''', '''module.fc.fc3.weight''', '''module.fc.fc3.bias''', ] for k in ignore_keys: state_dict.pop(__magic_name__ , __magic_name__ ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' lowercase : List[Any] = dct.pop(__magic_name__ ) lowercase : Union[str, Any] = val def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' lowercase : Optional[Any] = ViTMSNConfig() lowercase : str = 10_00 lowercase : List[str] = '''datasets/huggingface/label-files''' lowercase : List[str] = '''imagenet-1k-id2label.json''' lowercase : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ ) , '''r''' ) ) lowercase : Union[str, Any] = {int(__magic_name__ ): v for k, v in idalabel.items()} lowercase : Any = idalabel lowercase : List[Any] = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: lowercase : int = 3_84 lowercase : Optional[Any] = 15_36 lowercase : Tuple = 6 elif "l16" in checkpoint_url: lowercase : Union[str, Any] = 10_24 lowercase : List[str] = 40_96 lowercase : int = 24 lowercase : Union[str, Any] = 16 lowercase : Tuple = 0.1 elif "b4" in checkpoint_url: lowercase : Union[str, Any] = 4 elif "l7" in checkpoint_url: lowercase : Dict = 7 lowercase : List[Any] = 10_24 lowercase : str = 40_96 lowercase : int = 24 lowercase : Dict = 16 lowercase : Tuple = 0.1 lowercase : int = ViTMSNModel(__magic_name__ ) lowercase : List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''target_encoder'''] lowercase : Any = ViTImageProcessor(size=config.image_size ) remove_projection_head(__magic_name__ ) lowercase : List[str] = create_rename_keys(__magic_name__ , base_model=__magic_name__ ) for src, dest in rename_keys: rename_key(__magic_name__ , __magic_name__ , __magic_name__ ) read_in_q_k_v(__magic_name__ , __magic_name__ , base_model=__magic_name__ ) model.load_state_dict(__magic_name__ ) model.eval() lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) lowercase : Dict = ViTImageProcessor( size=config.image_size , image_mean=__magic_name__ , image_std=__magic_name__ ) lowercase : List[str] = image_processor(images=__magic_name__ , return_tensors='''pt''' ) # forward pass torch.manual_seed(2 ) lowercase : int = model(**__magic_name__ ) lowercase : Optional[Any] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: lowercase : List[str] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] ) elif "b16" in checkpoint_url: lowercase : Any = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] ) elif "l16" in checkpoint_url: lowercase : Dict = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] ) elif "b4" in checkpoint_url: lowercase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] ) else: lowercase : Optional[int] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , __magic_name__ , atol=1e-4 ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(__magic_name__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__magic_name__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar', type=str, help='URL of the checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowerCAmelCase_ = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
308
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def snake_case( ) -> Tuple: '''simple docstring''' lowercase : Union[str, Any] = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png''' lowercase : Optional[Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ).convert('''RGB''' ) return image def snake_case( __magic_name__ ) -> Optional[Any]: '''simple docstring''' lowercase : Optional[Any] = [] # fmt: off # vision encoder rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') ) rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') ) rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') ) rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') ) rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') ) # fmt: on return rename_keys def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]: '''simple docstring''' lowercase : int = dct.pop(__magic_name__ ) lowercase : Any = val def snake_case( __magic_name__ , __magic_name__ ) -> List[Any]: '''simple docstring''' for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases lowercase : List[str] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" ) lowercase : str = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict lowercase : Any = torch.cat((q_bias, torch.zeros_like(__magic_name__ , requires_grad=__magic_name__ ), v_bias) ) lowercase : Any = qkv_bias def snake_case( __magic_name__ , __magic_name__ ) -> Optional[int]: '''simple docstring''' lowercase : Any = 3_64 if '''coco''' in model_name else 2_24 lowercase : int = BlipaVisionConfig(image_size=__magic_name__ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: lowercase : int = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=__magic_name__ ).to_dict() elif "opt-6.7b" in model_name: lowercase : Optional[Any] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=__magic_name__ ).to_dict() elif "t5-xl" in model_name: lowercase : int = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: lowercase : Optional[Any] = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() lowercase : List[str] = BlipaConfig(vision_config=__magic_name__ , text_config=__magic_name__ ) return config, image_size @torch.no_grad() def snake_case( __magic_name__ , __magic_name__=None , __magic_name__=False ) -> int: '''simple docstring''' lowercase : List[str] = ( AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' ) if '''opt''' in model_name else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' ) ) lowercase : str = tokenizer('''\n''' , add_special_tokens=__magic_name__ ).input_ids[0] lowercase , lowercase : int = get_blipa_config(__magic_name__ , eos_token_id=__magic_name__ ) lowercase : Optional[int] = BlipaForConditionalGeneration(__magic_name__ ).eval() lowercase : Union[str, Any] = { '''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''), '''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''), '''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''), '''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''), '''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''), '''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''), '''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''), } lowercase , lowercase : List[Any] = model_name_to_original[model_name] # load original model print('''Loading original model...''' ) lowercase : Tuple = '''cuda''' if torch.cuda.is_available() else '''cpu''' lowercase , lowercase , lowercase : int = load_model_and_preprocess( name=__magic_name__ , model_type=__magic_name__ , is_eval=__magic_name__ , device=__magic_name__ ) original_model.eval() print('''Done!''' ) # update state dict keys lowercase : str = original_model.state_dict() lowercase : Tuple = create_rename_keys(__magic_name__ ) for src, dest in rename_keys: rename_key(__magic_name__ , __magic_name__ , __magic_name__ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): lowercase : List[Any] = state_dict.pop(__magic_name__ ) if key.startswith('''Qformer.bert''' ): lowercase : Optional[int] = key.replace('''Qformer.bert''' , '''qformer''' ) if "attention.self" in key: lowercase : Optional[int] = key.replace('''self''' , '''attention''' ) if "opt_proj" in key: lowercase : Union[str, Any] = key.replace('''opt_proj''' , '''language_projection''' ) if "t5_proj" in key: lowercase : Dict = key.replace('''t5_proj''' , '''language_projection''' ) if key.startswith('''opt''' ): lowercase : Any = key.replace('''opt''' , '''language''' ) if key.startswith('''t5''' ): lowercase : int = key.replace('''t5''' , '''language''' ) lowercase : Optional[int] = val # read in qv biases read_in_q_v_bias(__magic_name__ , __magic_name__ ) lowercase , lowercase : Dict = hf_model.load_state_dict(__magic_name__ , strict=__magic_name__ ) assert len(__magic_name__ ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] lowercase : List[str] = load_demo_image() lowercase : Union[str, Any] = vis_processors['''eval'''](__magic_name__ ).unsqueeze(0 ).to(__magic_name__ ) lowercase : List[str] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(__magic_name__ ) # create processor lowercase : List[str] = BlipImageProcessor( size={'''height''': image_size, '''width''': image_size} , image_mean=__magic_name__ , image_std=__magic_name__ ) lowercase : int = BlipaProcessor(image_processor=__magic_name__ , tokenizer=__magic_name__ ) lowercase : Any = processor(images=__magic_name__ , return_tensors='''pt''' ).pixel_values.to(__magic_name__ ) # make sure processor creates exact same pixel values assert torch.allclose(__magic_name__ , __magic_name__ ) original_model.to(__magic_name__ ) hf_model.to(__magic_name__ ) with torch.no_grad(): if "opt" in model_name: lowercase : Union[str, Any] = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits lowercase : Optional[int] = hf_model(__magic_name__ , __magic_name__ ).logits else: lowercase : List[Any] = original_model( {'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits lowercase : Optional[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) lowercase : Union[str, Any] = hf_model(__magic_name__ , __magic_name__ , labels=__magic_name__ ).logits assert original_logits.shape == logits.shape print('''First values of original logits:''' , original_logits[0, :3, :3] ) print('''First values of HF logits:''' , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": lowercase : Any = torch.tensor( [[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=__magic_name__ ) assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": lowercase : Dict = torch.tensor( [[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__magic_name__ ) else: # cast to same type lowercase : str = logits.dtype assert torch.allclose(original_logits.to(__magic_name__ ) , __magic_name__ , atol=1e-2 ) print('''Looks ok!''' ) print('''Generating a caption...''' ) lowercase : Any = '''''' lowercase : str = tokenizer(__magic_name__ , return_tensors='''pt''' ).input_ids.to(__magic_name__ ) lowercase : List[Any] = original_model.generate({'''image''': original_pixel_values} ) lowercase : str = hf_model.generate( __magic_name__ , __magic_name__ , do_sample=__magic_name__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print('''Original generation:''' , __magic_name__ ) lowercase : Optional[int] = input_ids.shape[1] lowercase : List[str] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__magic_name__ ) lowercase : Tuple = [text.strip() for text in output_text] print('''HF generation:''' , __magic_name__ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__magic_name__ ) hf_model.save_pretrained(__magic_name__ ) if push_to_hub: processor.push_to_hub(F"""nielsr/{model_name}""" ) hf_model.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() lowerCAmelCase_ = [ 'blip2-opt-2.7b', 'blip2-opt-6.7b', 'blip2-opt-2.7b-coco', 'blip2-opt-6.7b-coco', 'blip2-flan-t5-xl', 'blip2-flan-t5-xl-coco', 'blip2-flan-t5-xxl', ] parser.add_argument( '--model_name', default='blip2-opt-2.7b', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) lowerCAmelCase_ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
308
def snake_case( __magic_name__ , __magic_name__ ) -> float: '''simple docstring''' return price * (1 + tax_rate) if __name__ == "__main__": print(f'''{price_plus_tax(1_00, 0.2_5) = }''') print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
308
1
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def snake_case( __magic_name__ , __magic_name__ ) -> Optional[int]: '''simple docstring''' lowercase : Union[str, Any] = old_name if "patch_embed" in old_name: lowercase , lowercase , lowercase : Dict = old_name.split('''.''' ) if layer == "0": lowercase : Tuple = old_name.replace('''0''' , '''convolution1''' ) elif layer == "1": lowercase : str = old_name.replace('''1''' , '''batchnorm_before''' ) elif layer == "3": lowercase : List[Any] = old_name.replace('''3''' , '''convolution2''' ) else: lowercase : List[str] = old_name.replace('''4''' , '''batchnorm_after''' ) if "network" in old_name and re.search(r'''\d\.\d''' , __magic_name__ ): lowercase : List[str] = r'''\b\d{2}\b''' if bool(re.search(__magic_name__ , __magic_name__ ) ): lowercase : Optional[int] = re.search(r'''\d\.\d\d.''' , __magic_name__ ).group() else: lowercase : Optional[Any] = re.search(r'''\d\.\d.''' , __magic_name__ ).group() if int(match[0] ) < 6: lowercase : Dict = old_name.replace(__magic_name__ , '''''' ) lowercase : int = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] ) lowercase : Any = '''intermediate_stages.''' + trimmed_name else: lowercase : List[Any] = old_name.replace(__magic_name__ , '''''' ) if int(match[2] ) < num_meta4D_last_stage: lowercase : Union[str, Any] = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] ) else: lowercase : List[str] = str(int(match[2] ) - num_meta4D_last_stage ) lowercase : List[str] = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index ) if "norm1" in old_name: lowercase : str = trimmed_name.replace('''norm1''' , '''layernorm1''' ) elif "norm2" in old_name: lowercase : Union[str, Any] = trimmed_name.replace('''norm2''' , '''layernorm2''' ) elif "fc1" in old_name: lowercase : Optional[int] = trimmed_name.replace('''fc1''' , '''linear_in''' ) elif "fc2" in old_name: lowercase : int = trimmed_name.replace('''fc2''' , '''linear_out''' ) lowercase : Any = '''last_stage.''' + trimmed_name elif "network" in old_name and re.search(r'''.\d.''' , __magic_name__ ): lowercase : List[Any] = old_name.replace('''network''' , '''intermediate_stages''' ) if "fc" in new_name: lowercase : int = new_name.replace('''fc''' , '''convolution''' ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): lowercase : Union[str, Any] = new_name.replace('''norm1''' , '''batchnorm_before''' ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): lowercase : Optional[int] = new_name.replace('''norm2''' , '''batchnorm_after''' ) if "proj" in new_name: lowercase : Tuple = new_name.replace('''proj''' , '''projection''' ) if "dist_head" in new_name: lowercase : Optional[Any] = new_name.replace('''dist_head''' , '''distillation_classifier''' ) elif "head" in new_name: lowercase : Tuple = new_name.replace('''head''' , '''classifier''' ) elif "patch_embed" in new_name: lowercase : List[str] = '''efficientformer.''' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": lowercase : List[str] = new_name.replace('''norm''' , '''layernorm''' ) lowercase : Optional[int] = '''efficientformer.''' + new_name else: lowercase : Any = '''efficientformer.encoder.''' + new_name return new_name def snake_case( __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' for key in checkpoint.copy().keys(): lowercase : Dict = checkpoint.pop(__magic_name__ ) lowercase : int = val return checkpoint def snake_case( ) -> Union[str, Any]: '''simple docstring''' lowercase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase : int = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) return image def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]: '''simple docstring''' lowercase : Any = torch.load(__magic_name__ , map_location='''cpu''' )['''model'''] lowercase : int = EfficientFormerConfig.from_json_file(__magic_name__ ) lowercase : Optional[Any] = EfficientFormerForImageClassificationWithTeacher(__magic_name__ ) lowercase : int = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] ) lowercase : List[str] = config.depths[-1] - config.num_metaad_blocks + 1 lowercase : List[Any] = convert_torch_checkpoint(__magic_name__ , __magic_name__ ) model.load_state_dict(__magic_name__ ) model.eval() lowercase : Union[str, Any] = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } # prepare image lowercase : str = prepare_img() lowercase : Optional[Any] = 2_56 lowercase : Tuple = 2_24 lowercase : List[Any] = EfficientFormerImageProcessor( size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , ) lowercase : Any = processor(images=__magic_name__ , return_tensors='''pt''' ).pixel_values # original processing pipeline lowercase : int = Compose( [ Resize(__magic_name__ , interpolation=pillow_resamplings['''bicubic'''] ), CenterCrop(__magic_name__ ), ToTensor(), Normalize(__magic_name__ , __magic_name__ ), ] ) lowercase : Any = image_transforms(__magic_name__ ).unsqueeze(0 ) assert torch.allclose(__magic_name__ , __magic_name__ ) lowercase : Optional[Any] = model(__magic_name__ ) lowercase : str = outputs.logits lowercase : Optional[int] = (1, 10_00) if "l1" in model_name: lowercase : Tuple = torch.Tensor( [-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] ) assert torch.allclose(logits[0, :10] , __magic_name__ , atol=1e-3 ) assert logits.shape == expected_shape elif "l3" in model_name: lowercase : Optional[int] = torch.Tensor( [-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] ) assert torch.allclose(logits[0, :10] , __magic_name__ , atol=1e-3 ) assert logits.shape == expected_shape elif "l7" in model_name: lowercase : Optional[int] = torch.Tensor( [-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] ) assert logits.shape == expected_shape else: raise ValueError( F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" ) # Save Checkpoints Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) model.save_pretrained(__magic_name__ ) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) processor.save_pretrained(__magic_name__ ) print(F"""Processor successfuly saved at {pytorch_dump_path}""" ) if push_to_hub: print('''Pushing model to the hub...''' ) model.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message='''Add model''' , use_temp_dir=__magic_name__ , ) processor.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message='''Add image processor''' , use_temp_dir=__magic_name__ , ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--pytorch_model_path', default=None, type=str, required=True, help='Path to EfficientFormer pytorch checkpoint.', ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The json file for EfficientFormer model config.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') parser.add_argument( '--no-push_to_hub', dest='push_to_hub', action='store_false', help='Do not push model and image processor to the hub', ) parser.set_defaults(push_to_hub=True) lowerCAmelCase_ = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
308
import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class _A ( _lowerCamelCase ): def __init__( self : Tuple , _A : Dict , _A : Tuple , _A : List[Any]=1_024 , _A : str=1_024 , _A : str=3.6 ) -> Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = tokenizer lowercase : List[Any] = tokenizer.bos_token_id lowercase : Union[str, Any] = dataset lowercase : Union[str, Any] = seq_length lowercase : Optional[int] = seq_length * chars_per_token * num_of_sequences def __iter__( self : int ) -> int: """simple docstring""" lowercase : Dict = iter(self.dataset ) lowercase : Union[str, Any] = True while more_examples: lowercase , lowercase : Tuple = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(_A )['''content'''] ) buffer_len += len(buffer[-1] ) except StopIteration: lowercase : List[str] = False break lowercase : str = tokenizer(_A , truncation=_A )['''input_ids'''] lowercase : List[str] = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(_A ) , self.seq_length ): lowercase : int = all_token_ids[i : i + self.seq_length] if len(_A ) == self.seq_length: yield torch.tensor(_A ) def snake_case( __magic_name__ ) -> Optional[Any]: '''simple docstring''' lowercase : List[str] = {'''streaming''': True} lowercase : Dict = load_dataset(args.dataset_name , split='''train''' , **__magic_name__ ) lowercase : int = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length ) lowercase : Tuple = DataLoader(__magic_name__ , batch_size=args.batch_size ) return eval_dataloader def snake_case( __magic_name__ ) -> str: '''simple docstring''' model.eval() lowercase : str = [] for step, batch in enumerate(__magic_name__ ): with torch.no_grad(): lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ ) lowercase : List[Any] = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(__magic_name__ ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break lowercase : Union[str, Any] = torch.mean(torch.cat(__magic_name__ ) ) try: lowercase : Tuple = torch.exp(__magic_name__ ) except OverflowError: lowercase : List[str] = float('''inf''' ) return loss.item(), perplexity.item() # Setup Accelerator lowerCAmelCase_ = Accelerator() # Parse configuration lowerCAmelCase_ = HfArgumentParser(EvaluationArguments) lowerCAmelCase_ = parser.parse_args() set_seed(args.seed) # Logging lowerCAmelCase_ = logging.getLogger(__name__) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) # Load model and tokenizer lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt) lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader lowerCAmelCase_ = create_dataloader(args) # Prepare everything with our `accelerator`. lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('Evaluating and saving model after training') lowerCAmelCase_ , lowerCAmelCase_ = evaluate(args) logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
308
1
import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class _A ( _lowerCamelCase ): def __init__( self : Optional[int] , *_A : Dict , **_A : str ) -> None: """simple docstring""" warnings.warn( '''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use ImageGPTImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
308
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def snake_case( __magic_name__ ) -> Optional[Any]: '''simple docstring''' assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def snake_case( ) -> Optional[Any]: '''simple docstring''' assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def snake_case( ) -> int: '''simple docstring''' lowercase : List[str] = '''mock-s3-bucket''' lowercase : Optional[int] = F"""s3://{mock_bucket}""" lowercase : List[Any] = extract_path_from_uri(__magic_name__ ) assert dataset_path.startswith('''s3://''' ) is False lowercase : Optional[int] = '''./local/path''' lowercase : Dict = extract_path_from_uri(__magic_name__ ) assert dataset_path == new_dataset_path def snake_case( __magic_name__ ) -> Optional[Any]: '''simple docstring''' lowercase : Tuple = is_remote_filesystem(__magic_name__ ) assert is_remote is True lowercase : int = fsspec.filesystem('''file''' ) lowercase : Optional[Any] = is_remote_filesystem(__magic_name__ ) assert is_remote is False @pytest.mark.parametrize('''compression_fs_class''' , __magic_name__ ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]: '''simple docstring''' lowercase : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file} lowercase : List[Any] = input_paths[compression_fs_class.protocol] if input_path is None: lowercase : Dict = F"""for '{compression_fs_class.protocol}' compression protocol, """ if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(__magic_name__ ) lowercase : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__magic_name__ ) assert isinstance(__magic_name__ , __magic_name__ ) lowercase : List[Any] = os.path.basename(__magic_name__ ) lowercase : Tuple = expected_filename[: expected_filename.rindex('''.''' )] assert fs.glob('''*''' ) == [expected_filename] with fs.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f, open(__magic_name__ , encoding='''utf-8''' ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]: '''simple docstring''' lowercase : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path} lowercase : List[str] = compressed_file_paths[protocol] lowercase : str = '''dataset.jsonl''' lowercase : List[str] = F"""{protocol}://{member_file_path}::{compressed_file_path}""" lowercase , *lowercase : Tuple = fsspec.get_fs_token_paths(__magic_name__ ) assert fs.isfile(__magic_name__ ) assert not fs.isfile('''non_existing_''' + member_file_path ) @pytest.mark.integration def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' lowercase : Optional[Any] = hf_api.dataset_info(__magic_name__ , token=__magic_name__ ) lowercase : int = HfFileSystem(repo_info=__magic_name__ , token=__magic_name__ ) assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"] assert hffs.isdir('''data''' ) assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' ) with open(__magic_name__ ) as f: assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read() def snake_case( ) -> List[Any]: '''simple docstring''' lowercase : List[Any] = '''bz2''' # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(__magic_name__ , __magic_name__ , clobber=__magic_name__ ) with pytest.warns(__magic_name__ ) as warning_info: importlib.reload(datasets.filesystems ) assert len(__magic_name__ ) == 1 assert ( str(warning_info[0].message ) == F"""A filesystem protocol was already set for {protocol} and will be overwritten.""" )
308
1
def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : List[Any] = abs(__magic_name__ ) lowercase : Optional[Any] = 0 while n > 0: res += n % 10 n //= 10 return res def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : Optional[int] = abs(__magic_name__ ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def snake_case( __magic_name__ ) -> int: '''simple docstring''' return sum(int(__magic_name__ ) for c in str(abs(__magic_name__ ) ) ) def snake_case( ) -> None: '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(__magic_name__ , __magic_name__ ) -> None: lowercase : str = F"""{func.__name__}({value})""" lowercase : Any = timeit(F"""__main__.{call}""" , setup='''import __main__''' ) print(F"""{call:56} = {func(__magic_name__ )} -- {timing:.4f} seconds""" ) for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(__magic_name__ , __magic_name__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
308
import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowerCAmelCase_ = logging.get_logger(__name__) class _A ( enum.Enum ): _UpperCamelCase : Union[str, Any] = 0 _UpperCamelCase : Any = 1 @add_end_docstrings(_lowerCamelCase ) class _A ( _lowerCamelCase ): _UpperCamelCase : List[Any] = '''generated''' def __init__( self : str , *_A : int , **_A : str ) -> Union[str, Any]: """simple docstring""" super().__init__(*_A , **_A ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def __a ( self : int , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=None , _A : Dict=None , _A : Union[str, Any]=None , _A : int=None , **_A : Optional[int] , ) -> List[Any]: """simple docstring""" lowercase : str = {} if truncation is not None: lowercase : Tuple = truncation lowercase : Tuple = generate_kwargs lowercase : Optional[Any] = {} if return_tensors is not None and return_type is None: lowercase : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: lowercase : Dict = return_type if clean_up_tokenization_spaces is not None: lowercase : Dict = clean_up_tokenization_spaces if stop_sequence is not None: lowercase : Dict = self.tokenizer.encode(_A , add_special_tokens=_A ) if len(_A ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) lowercase : List[str] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def __a ( self : str , _A : int , _A : int , _A : int ) -> List[Any]: """simple docstring""" return True def __a ( self : Union[str, Any] , *_A : Union[str, Any] , _A : List[Any] ) -> Dict: """simple docstring""" lowercase : Tuple = self.model.config.prefix if self.model.config.prefix is not None else '''''' if isinstance(args[0] , _A ): if self.tokenizer.pad_token_id is None: raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' ) lowercase : List[Any] = ([prefix + arg for arg in args[0]],) lowercase : Dict = True elif isinstance(args[0] , _A ): lowercase : Optional[int] = (prefix + args[0],) lowercase : Union[str, Any] = False else: raise ValueError( f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" ) lowercase : Any = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self : Union[str, Any] , *_A : Optional[int] , **_A : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase : Any = super().__call__(*_A , **_A ) if ( isinstance(args[0] , _A ) and all(isinstance(_A , _A ) for el in args[0] ) and all(len(_A ) == 1 for res in result ) ): return [res[0] for res in result] return result def __a ( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : List[str] ) -> List[Any]: """simple docstring""" lowercase : Optional[int] = self._parse_and_tokenize(_A , truncation=_A , **_A ) return inputs def __a ( self : int , _A : Optional[Any] , **_A : Any ) -> Any: """simple docstring""" if self.framework == "pt": lowercase , lowercase : List[Any] = model_inputs['''input_ids'''].shape elif self.framework == "tf": lowercase , lowercase : Optional[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy() lowercase : int = generate_kwargs.get('''min_length''' , self.model.config.min_length ) lowercase : Optional[int] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] ) lowercase : int = self.model.generate(**_A , **_A ) lowercase : int = output_ids.shape[0] if self.framework == "pt": lowercase : Optional[Any] = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": lowercase : Tuple = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def __a ( self : Union[str, Any] , _A : str , _A : Optional[int]=ReturnType.TEXT , _A : Optional[int]=False ) -> Tuple: """simple docstring""" lowercase : Any = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: lowercase : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids} elif return_type == ReturnType.TEXT: lowercase : Dict = { f"""{self.return_name}_text""": self.tokenizer.decode( _A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) } records.append(_A ) return records @add_end_docstrings(_lowerCamelCase ) class _A ( _lowerCamelCase ): _UpperCamelCase : List[str] = '''summary''' def __call__( self : List[Any] , *_A : List[str] , **_A : Union[str, Any] ) -> Optional[int]: """simple docstring""" return super().__call__(*_A , **_A ) def __a ( self : Any , _A : int , _A : int , _A : int ) -> bool: """simple docstring""" if max_length < min_length: logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" ) if input_length < max_length: logger.warning( f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """ '''a summarization task, where outputs shorter than the input are typically wanted, you might ''' f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" ) @add_end_docstrings(_lowerCamelCase ) class _A ( _lowerCamelCase ): _UpperCamelCase : List[str] = '''translation''' def __a ( self : Union[str, Any] , _A : int , _A : int , _A : int ) -> List[Any]: """simple docstring""" if input_length > 0.9 * max_length: logger.warning( f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """ '''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' ) return True def __a ( self : Optional[Any] , *_A : Optional[Any] , _A : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , _A : List[Any]=None , _A : Any=None ) -> Dict: """simple docstring""" if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ): return self.tokenizer._build_translation_inputs( *_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A ) else: return super()._parse_and_tokenize(*_A , truncation=_A ) def __a ( self : Any , _A : Tuple=None , _A : Any=None , **_A : Any ) -> Optional[int]: """simple docstring""" lowercase , lowercase , lowercase : Dict = super()._sanitize_parameters(**_A ) if src_lang is not None: lowercase : Optional[Any] = src_lang if tgt_lang is not None: lowercase : Dict = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. lowercase : Dict = kwargs.get('''task''' , self.task ) lowercase : List[str] = task.split('''_''' ) if task and len(_A ) == 4: # translation, XX, to YY lowercase : Any = items[1] lowercase : List[str] = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self : Tuple , *_A : Union[str, Any] , **_A : List[Any] ) -> List[Any]: """simple docstring""" return super().__call__(*_A , **_A )
308
1
from random import shuffle import tensorflow as tf from numpy import array def snake_case( __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' lowercase : List[str] = int(__magic_name__ ) assert noofclusters < len(__magic_name__ ) # Find out the dimensionality lowercase : int = len(vectors[0] ) # Will help select random centroids from among the available vectors lowercase : Dict = list(range(len(__magic_name__ ) ) ) shuffle(__magic_name__ ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. lowercase : int = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION lowercase : List[str] = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points lowercase : str = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(__magic_name__ ) ] ##These nodes will assign the centroid Variables the appropriate ##values lowercase : Any = tf.placeholder('''float64''' , [dim] ) lowercase : List[str] = [] for centroid in centroids: cent_assigns.append(tf.assign(__magic_name__ , __magic_name__ ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) lowercase : Optional[Any] = [tf.Variable(0 ) for i in range(len(__magic_name__ ) )] ##These nodes will assign an assignment Variable the appropriate ##value lowercase : Optional[int] = tf.placeholder('''int32''' ) lowercase : Tuple = [] for assignment in assignments: cluster_assigns.append(tf.assign(__magic_name__ , __magic_name__ ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input lowercase : Optional[int] = tf.placeholder('''float''' , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors lowercase : List[str] = tf.reduce_mean(__magic_name__ , 0 ) ##Node for computing Euclidean distances # Placeholders for input lowercase : Union[str, Any] = tf.placeholder('''float''' , [dim] ) lowercase : Optional[int] = tf.placeholder('''float''' , [dim] ) lowercase : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__magic_name__ , __magic_name__ ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input lowercase : List[Any] = tf.placeholder('''float''' , [noofclusters] ) lowercase : Dict = tf.argmin(__magic_name__ , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. lowercase : Union[str, Any] = tf.initialize_all_variables() # Initialize all variables sess.run(__magic_name__ ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. lowercase : str = 1_00 for _ in range(__magic_name__ ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(__magic_name__ ) ): lowercase : Dict = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. lowercase : List[Any] = [ sess.run(__magic_name__ , feed_dict={va: vect, va: sess.run(__magic_name__ )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input lowercase : str = sess.run( __magic_name__ , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(__magic_name__ ): # Collect all the vectors assigned to this cluster lowercase : Any = [ vectors[i] for i in range(len(__magic_name__ ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location lowercase : Dict = sess.run( __magic_name__ , feed_dict={mean_input: array(__magic_name__ )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments lowercase : Optional[Any] = sess.run(__magic_name__ ) lowercase : Any = sess.run(__magic_name__ ) return centroids, assignments
308
# Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCAmelCase_ = get_logger(__name__) class _A : _UpperCamelCase : int = '''dummy_data''' _UpperCamelCase : Tuple = '''datasets''' _UpperCamelCase : Optional[int] = False def __init__( self : Any , _A : str , _A : str , _A : Union[Version, str] , _A : Optional[str] = None , _A : bool = False , _A : bool = True , _A : Optional[List[Callable]] = None , ) -> Dict: """simple docstring""" lowercase : Tuple = 0 lowercase : List[Any] = dataset_name lowercase : int = cache_dir lowercase : str = use_local_dummy_data lowercase : Union[str, Any] = config # download_callbacks take a single url as input lowercase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root lowercase : Any = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general lowercase : Union[str, Any] = str(_A ) # to be downloaded lowercase : Tuple = None lowercase : Optional[int] = None @property def __a ( self : str ) -> Dict: """simple docstring""" if self._dummy_file is None: lowercase : Optional[Any] = self.download_dummy_data() return self._dummy_file @property def __a ( self : int ) -> Optional[Any]: """simple docstring""" if self.config is not None: # structure is dummy / config_name / version_name return os.path.join('''dummy''' , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join('''dummy''' , self.version_name ) @property def __a ( self : List[Any] ) -> int: """simple docstring""" return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' ) def __a ( self : str ) -> int: """simple docstring""" lowercase : str = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) lowercase : List[str] = cached_path( _A , cache_dir=self.cache_dir , extract_compressed_file=_A , force_extract=_A ) return os.path.join(_A , self.dummy_file_name ) @property def __a ( self : str ) -> Tuple: """simple docstring""" return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def __a ( self : Optional[int] ) -> Optional[int]: """simple docstring""" if self._bucket_url is None: lowercase : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) ) return self._bucket_url @property def __a ( self : Tuple ) -> List[str]: """simple docstring""" if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] ) def __a ( self : Union[str, Any] , _A : Dict , *_A : Union[str, Any] ) -> Optional[Any]: """simple docstring""" if self.load_existing_dummy_data: # dummy data is downloaded and tested lowercase : Union[str, Any] = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned lowercase : Optional[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(_A , _A ): return self.create_dummy_data_dict(_A , _A ) elif isinstance(_A , (list, tuple) ): return self.create_dummy_data_list(_A , _A ) else: return self.create_dummy_data_single(_A , _A ) def __a ( self : str , _A : Union[str, Any] , *_A : Dict ) -> Dict: """simple docstring""" return self.download_and_extract(_A ) def __a ( self : str , _A : List[str] , _A : Any ) -> Union[str, Any]: """simple docstring""" return self.download_and_extract(_A ) def __a ( self : Optional[int] , _A : Tuple , *_A : str , **_A : Any ) -> Optional[Any]: """simple docstring""" return path def __a ( self : List[str] ) -> str: """simple docstring""" return {} def __a ( self : List[str] , _A : Union[str, Any] , _A : List[Any] ) -> Optional[Any]: """simple docstring""" lowercase : Any = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(_A , _A ): for single_url in single_urls: download_callback(_A ) else: lowercase : List[str] = single_urls download_callback(_A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(_A , _A ): lowercase : int = [os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) for x in single_urls] else: lowercase : int = single_urls lowercase : Any = os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) lowercase : str = value # make sure that values are unique if all(isinstance(_A , _A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique lowercase : str = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def __a ( self : Optional[int] , _A : List[Any] , _A : Tuple ) -> Tuple: """simple docstring""" lowercase : Optional[Any] = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one lowercase : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , _A ) ) for url in data_url ) lowercase : str = all( url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): lowercase : List[str] = [data_url[0]] * len(_A ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(_A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowercase : Optional[int] = os.path.join(_A , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) ) dummy_data_list.append(_A ) return dummy_data_list def __a ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ) -> List[str]: """simple docstring""" for download_callback in self.download_callbacks: download_callback(_A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowercase : Dict = os.path.join(_A , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) ) if os.path.exists(_A ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def __a ( self : Union[str, Any] ) -> Any: """simple docstring""" pass def __a ( self : Any ) -> Dict: """simple docstring""" pass def __a ( self : int , _A : Optional[Any] ) -> Dict: """simple docstring""" def _iter_archive_members(_A : Optional[int] ): # this preserves the order of the members inside the ZIP archive lowercase : int = Path(self.dummy_file ).parent lowercase : List[str] = path.relative_to(_A ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: lowercase : Optional[int] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(_A ) lowercase : Tuple = Path(_A ) lowercase : List[Any] = _iter_archive_members(_A ) if self.use_local_dummy_data else path.rglob('''*''' ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ): yield file_path.relative_to(_A ).as_posix(), file_path.open('''rb''' ) def __a ( self : Optional[Any] , _A : Dict ) -> Union[str, Any]: """simple docstring""" if not isinstance(_A , _A ): lowercase : Dict = [paths] for path in paths: if os.path.isfile(_A ): if os.path.basename(_A ).startswith(('''.''', '''__''') ): return yield path else: for dirpath, dirnames, filenames in os.walk(_A ): if os.path.basename(_A ).startswith(('''.''', '''__''') ): continue dirnames.sort() for filename in sorted(_A ): if filename.startswith(('''.''', '''__''') ): continue yield os.path.join(_A , _A )
308
1
from functools import lru_cache def snake_case( __magic_name__ ) -> set: '''simple docstring''' lowercase : str = 2 lowercase : Any = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__magic_name__ ) if n > 1: factors.add(__magic_name__ ) return factors @lru_cache def snake_case( __magic_name__ ) -> int: '''simple docstring''' return len(unique_prime_factors(__magic_name__ ) ) def snake_case( __magic_name__ ) -> bool: '''simple docstring''' return len(set(__magic_name__ ) ) in (0, 1) def snake_case( __magic_name__ ) -> list: '''simple docstring''' lowercase : List[str] = 2 while True: # Increment each value of a generated range lowercase : Optional[int] = [base + i for i in range(__magic_name__ )] # Run elements through out unique_prime_factors function # Append our target number to the end. lowercase : str = [upf_len(__magic_name__ ) for x in group] checker.append(__magic_name__ ) # If all numbers in the list are equal, return the group variable. if equality(__magic_name__ ): return group # Increment our base variable by 1 base += 1 def snake_case( __magic_name__ = 4 ) -> int: '''simple docstring''' lowercase : Union[str, Any] = run(__magic_name__ ) return results[0] if len(__magic_name__ ) else None if __name__ == "__main__": print(solution())
308
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' lowercase : Union[str, Any] = [False] * len(__magic_name__ ) lowercase : Optional[int] = [] queue.append(__magic_name__ ) lowercase : int = True while queue: lowercase : Union[str, Any] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__magic_name__ ) lowercase : Dict = True lowercase : List[str] = u return visited[t] def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple: '''simple docstring''' lowercase : List[str] = [-1] * (len(__magic_name__ )) lowercase : Tuple = 0 while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): lowercase : Any = float('''Inf''' ) lowercase : str = sink while s != source: # Find the minimum value in select path lowercase : Any = min(__magic_name__ , graph[parent[s]][s] ) lowercase : Dict = parent[s] max_flow += path_flow lowercase : Union[str, Any] = sink while v != source: lowercase : List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowercase : Optional[int] = parent[v] return max_flow lowerCAmelCase_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] lowerCAmelCase_ , lowerCAmelCase_ = 0, 5 print(ford_fulkerson(graph, source, sink))
308
1
def snake_case( ) -> int: '''simple docstring''' return [ a * b * (10_00 - a - b) for a in range(1 , 9_99 ) for b in range(__magic_name__ , 9_99 ) if (a * a + b * b == (10_00 - a - b) ** 2) ][0] if __name__ == "__main__": print(f'''{solution() = }''')
308
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'vocab_file': 'vocab.txt'} lowerCAmelCase_ = { 'vocab_file': { 'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt', }, } lowerCAmelCase_ = { 'openbmb/cpm-ant-10b': 10_24, } def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : Optional[int] = collections.OrderedDict() with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader: lowercase : str = reader.readlines() for index, token in enumerate(__magic_name__ ): lowercase : Union[str, Any] = token.rstrip('''\n''' ) lowercase : List[Any] = index return vocab class _A ( _lowerCamelCase ): def __init__( self : List[str] , _A : Any , _A : List[str]="<unk>" , _A : Union[str, Any]=200 ) -> List[Any]: """simple docstring""" lowercase : Optional[int] = vocab lowercase : List[str] = unk_token lowercase : Any = max_input_chars_per_word def __a ( self : List[str] , _A : Tuple ) -> str: """simple docstring""" lowercase : Dict = list(_A ) if len(_A ) > self.max_input_chars_per_word: return [self.unk_token] lowercase : int = 0 lowercase : Dict = [] while start < len(_A ): lowercase : Optional[Any] = len(_A ) lowercase : List[str] = None while start < end: lowercase : List[Any] = ''''''.join(chars[start:end] ) if substr in self.vocab: lowercase : Union[str, Any] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(_A ) lowercase : Dict = end return sub_tokens class _A ( _lowerCamelCase ): _UpperCamelCase : List[str] = VOCAB_FILES_NAMES _UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask'''] _UpperCamelCase : int = False def __init__( self : List[str] , _A : int , _A : Optional[Any]="<d>" , _A : Any="</d>" , _A : Optional[Any]="<s>" , _A : Any="</s>" , _A : Any="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]="</n>" , _A : List[str]="</_>" , _A : Optional[Any]="left" , **_A : str , ) -> Tuple: """simple docstring""" requires_backends(self , ['''jieba'''] ) super().__init__( bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , ) lowercase : str = bod_token lowercase : str = eod_token lowercase : Any = load_vocab(_A ) lowercase : List[Any] = self.encoder[space_token] lowercase : Tuple = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] lowercase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) ) lowercase : int = {v: k for k, v in self.encoder.items()} lowercase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def __a ( self : Dict ) -> Optional[int]: """simple docstring""" return self.encoder[self.bod_token] @property def __a ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" return self.encoder[self.eod_token] @property def __a ( self : List[str] ) -> List[str]: """simple docstring""" return self.encoder["\n"] @property def __a ( self : List[Any] ) -> int: """simple docstring""" return len(self.encoder ) def __a ( self : Union[str, Any] ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self : str , _A : List[str] ) -> Tuple: """simple docstring""" lowercase : int = [] for x in jieba.cut(_A , cut_all=_A ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) ) return output_tokens def __a ( self : List[Any] , _A : Tuple , **_A : Optional[int] ) -> Any: """simple docstring""" lowercase : List[str] = [i for i in token_ids if i >= 0] lowercase : Any = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(_A , **_A ) def __a ( self : List[Any] , _A : int ) -> Optional[Any]: """simple docstring""" return token in self.encoder def __a ( self : Dict , _A : List[str] ) -> str: """simple docstring""" return "".join(_A ) def __a ( self : List[str] , _A : List[str] ) -> Any: """simple docstring""" return self.encoder.get(_A , self.encoder.get(self.unk_token ) ) def __a ( self : Tuple , _A : Union[str, Any] ) -> Tuple: """simple docstring""" return self.decoder.get(_A , self.unk_token ) def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if os.path.isdir(_A ): lowercase : str = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: lowercase : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory lowercase : Any = 0 if " " in self.encoder: lowercase : List[Any] = self.encoder[''' '''] del self.encoder[" "] if "\n" in self.encoder: lowercase : Dict = self.encoder['''\n'''] del self.encoder["\n"] lowercase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) ) with open(_A , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) lowercase : Any = token_index writer.write(token + '''\n''' ) index += 1 return (vocab_file,) def __a ( self : str , _A : List[int] , _A : List[int] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is not None: return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) return [1] + ([0] * len(_A ))
308
1
from collections.abc import Callable class _A : def __init__( self : str , _A : Callable | None = None ) -> None: """simple docstring""" lowercase : list = [] # Stores indexes of each item for supporting updates and deletion. lowercase : dict = {} # Stores current size of heap. lowercase : Any = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. lowercase : Any = key or (lambda _A : x) def __a ( self : Tuple , _A : int ) -> int | None: """simple docstring""" return int((i - 1) / 2 ) if i > 0 else None def __a ( self : int , _A : int ) -> int | None: """simple docstring""" lowercase : List[str] = int(2 * i + 1 ) return left if 0 < left < self.size else None def __a ( self : Optional[int] , _A : int ) -> int | None: """simple docstring""" lowercase : List[Any] = int(2 * i + 2 ) return right if 0 < right < self.size else None def __a ( self : int , _A : int , _A : int ) -> None: """simple docstring""" lowercase , lowercase : str = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. lowercase , lowercase : Union[str, Any] = self.arr[j], self.arr[i] def __a ( self : List[str] , _A : int , _A : int ) -> bool: """simple docstring""" return self.arr[i][1] < self.arr[j][1] def __a ( self : int , _A : int ) -> int: """simple docstring""" lowercase : Any = self._left(_A ) lowercase : Any = self._right(_A ) lowercase : Tuple = i if left is not None and not self._cmp(_A , _A ): lowercase : str = left if right is not None and not self._cmp(_A , _A ): lowercase : Tuple = right return valid_parent def __a ( self : Any , _A : int ) -> None: """simple docstring""" lowercase : str = self._parent(_A ) while parent is not None and not self._cmp(_A , _A ): self._swap(_A , _A ) lowercase , lowercase : Optional[Any] = parent, self._parent(_A ) def __a ( self : int , _A : int ) -> None: """simple docstring""" lowercase : List[Any] = self._get_valid_parent(_A ) while valid_parent != index: self._swap(_A , _A ) lowercase , lowercase : Tuple = valid_parent, self._get_valid_parent(_A ) def __a ( self : List[str] , _A : int , _A : int ) -> None: """simple docstring""" if item not in self.pos_map: return lowercase : Optional[Any] = self.pos_map[item] lowercase : Union[str, Any] = [item, self.key(_A )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(_A ) self._heapify_down(_A ) def __a ( self : Optional[Any] , _A : int ) -> None: """simple docstring""" if item not in self.pos_map: return lowercase : Tuple = self.pos_map[item] del self.pos_map[item] lowercase : Tuple = self.arr[self.size - 1] lowercase : str = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(_A ) self._heapify_down(_A ) def __a ( self : List[str] , _A : int , _A : int ) -> None: """simple docstring""" lowercase : Union[str, Any] = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(_A )] ) else: lowercase : Any = [item, self.key(_A )] lowercase : List[str] = self.size self.size += 1 self._heapify_up(self.size - 1 ) def __a ( self : str ) -> tuple | None: """simple docstring""" return self.arr[0] if self.size else None def __a ( self : List[Any] ) -> tuple | None: """simple docstring""" lowercase : List[str] = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def snake_case( ) -> None: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
308
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]: '''simple docstring''' lowercase : int = 1.5 lowercase : int = int(factor * num_class_images ) lowercase : Any = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 ) os.makedirs(F"""{class_data_dir}/images""" , exist_ok=__magic_name__ ) if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images: return while True: lowercase : str = client.query(text=__magic_name__ ) if len(__magic_name__ ) >= factor * num_class_images or num_images > 1e4: break else: lowercase : List[str] = int(factor * num_images ) lowercase : List[str] = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , ) lowercase : Dict = 0 lowercase : Optional[Any] = 0 lowercase : List[Any] = tqdm(desc='''downloading real regularization images''' , total=__magic_name__ ) with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open( F"""{class_data_dir}/images.txt""" , '''w''' ) as fa: while total < num_class_images: lowercase : int = class_images[count] count += 1 try: lowercase : int = requests.get(images['''url'''] ) if img.status_code == 2_00: lowercase : List[Any] = Image.open(BytesIO(img.content ) ) with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f: f.write(img.content ) fa.write(images['''caption'''] + '''\n''' ) fa.write(images['''url'''] + '''\n''' ) fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def snake_case( ) -> Optional[int]: '''simple docstring''' lowercase : List[str] = argparse.ArgumentParser('''''' , add_help=__magic_name__ ) parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ ) parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ ) parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=__magic_name__ ) return parser.parse_args() if __name__ == "__main__": lowerCAmelCase_ = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
308
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCAmelCase_ = logging.get_logger(__name__) if is_vision_available(): import PIL class _A ( _lowerCamelCase ): _UpperCamelCase : List[Any] = ['''pixel_values'''] def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : bool = True , **_A : Tuple , ) -> None: """simple docstring""" super().__init__(**_A ) lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 224} lowercase : Tuple = get_size_dict(_A , default_to_square=_A ) lowercase : str = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowercase : Dict = get_size_dict(_A , default_to_square=_A , param_name='''crop_size''' ) lowercase : List[Any] = do_resize lowercase : Union[str, Any] = size lowercase : Dict = resample lowercase : Optional[Any] = do_center_crop lowercase : Optional[Any] = crop_size lowercase : Optional[int] = do_rescale lowercase : int = rescale_factor lowercase : Optional[int] = do_normalize lowercase : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowercase : Any = image_std if image_std is not None else OPENAI_CLIP_STD lowercase : Tuple = do_convert_rgb def __a ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray: """simple docstring""" lowercase : Tuple = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) lowercase : Dict = get_resize_output_image_size(_A , size=size['''shortest_edge'''] , default_to_square=_A ) return resize(_A , size=_A , resample=_A , data_format=_A , **_A ) def __a ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray: """simple docstring""" lowercase : Dict = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A ) def __a ( self : List[str] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[Any] , ) -> List[Any]: """simple docstring""" return rescale(_A , scale=_A , data_format=_A , **_A ) def __a ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray: """simple docstring""" return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def __a ( self : List[str] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : int = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : bool = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , **_A : Tuple , ) -> PIL.Image.Image: """simple docstring""" lowercase : Any = do_resize if do_resize is not None else self.do_resize lowercase : Tuple = size if size is not None else self.size lowercase : List[str] = get_size_dict(_A , param_name='''size''' , default_to_square=_A ) lowercase : Any = resample if resample is not None else self.resample lowercase : str = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase : str = crop_size if crop_size is not None else self.crop_size lowercase : Dict = get_size_dict(_A , param_name='''crop_size''' , default_to_square=_A ) lowercase : int = do_rescale if do_rescale is not None else self.do_rescale lowercase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase : Tuple = do_normalize if do_normalize is not None else self.do_normalize lowercase : Tuple = image_mean if image_mean is not None else self.image_mean lowercase : Optional[Any] = image_std if image_std is not None else self.image_std lowercase : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase : Union[str, Any] = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase : int = [convert_to_rgb(_A ) for image in images] # All transformations expect numpy arrays. lowercase : Dict = [to_numpy_array(_A ) for image in images] if do_resize: lowercase : List[str] = [self.resize(image=_A , size=_A , resample=_A ) for image in images] if do_center_crop: lowercase : Tuple = [self.center_crop(image=_A , size=_A ) for image in images] if do_rescale: lowercase : List[Any] = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: lowercase : List[Any] = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] lowercase : Optional[int] = [to_channel_dimension_format(_A , _A ) for image in images] lowercase : Tuple = {'''pixel_values''': images} return BatchFeature(data=_A , tensor_type=_A )
308
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def snake_case( ) -> int: '''simple docstring''' lowercase : List[str] = ArgumentParser( description=( '''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes''' ) ) # Optional arguments for the launch helper parser.add_argument('''--num_cores''' , type=__magic_name__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' ) # positional parser.add_argument( '''training_script''' , type=__magic_name__ , help=( '''The full path to the single TPU training ''' '''program/script to be launched in parallel, ''' '''followed by all the arguments for the ''' '''training script''' ) , ) # rest from the training program parser.add_argument('''training_script_args''' , nargs=__magic_name__ ) return parser.parse_args() def snake_case( ) -> Union[str, Any]: '''simple docstring''' lowercase : Optional[Any] = parse_args() # Import training_script as a module. lowercase : Optional[Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowercase : int = script_fpath.stem lowercase : List[Any] = importlib.import_module(__magic_name__ ) # Patch sys.argv lowercase : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
308
1
from math import sqrt def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : Dict = 0 for i in range(1 , int(sqrt(__magic_name__ ) + 1 ) ): if n % i == 0 and i != sqrt(__magic_name__ ): total += i + n // i elif i == sqrt(__magic_name__ ): total += i return total - n def snake_case( __magic_name__ = 1_00_00 ) -> int: '''simple docstring''' lowercase : Optional[int] = sum( i for i in range(1 , __magic_name__ ) if sum_of_divisors(sum_of_divisors(__magic_name__ ) ) == i and sum_of_divisors(__magic_name__ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
308
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase_ = logging.get_logger(__name__) def snake_case( __magic_name__ ) -> List[List[ImageInput]]: '''simple docstring''' if isinstance(__magic_name__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__magic_name__ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__magic_name__ ): return [[videos]] raise ValueError(F"""Could not make batched video from {videos}""" ) class _A ( _lowerCamelCase ): _UpperCamelCase : str = ['''pixel_values'''] def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[int] , ) -> None: """simple docstring""" super().__init__(**_A ) lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 224} lowercase : Tuple = get_size_dict(_A , default_to_square=_A ) lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowercase : Dict = get_size_dict(_A , param_name='''crop_size''' ) lowercase : List[str] = do_resize lowercase : Optional[Any] = size lowercase : List[str] = do_center_crop lowercase : List[Any] = crop_size lowercase : str = resample lowercase : Tuple = do_rescale lowercase : Any = rescale_factor lowercase : Tuple = do_normalize lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray: """simple docstring""" lowercase : Tuple = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" in size: lowercase : Dict = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A ) elif "height" in size and "width" in size: lowercase : Union[str, Any] = (size['''height'''], size['''width''']) else: raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(_A , size=_A , resample=_A , data_format=_A , **_A ) def __a ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray: """simple docstring""" lowercase : Optional[Any] = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A ) def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Union[str, Any]: """simple docstring""" return rescale(_A , scale=_A , data_format=_A , **_A ) def __a ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray: """simple docstring""" return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def __a ( self : int , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: """simple docstring""" if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowercase : Union[str, Any] = to_numpy_array(_A ) if do_resize: lowercase : List[Any] = self.resize(image=_A , size=_A , resample=_A ) if do_center_crop: lowercase : Optional[int] = self.center_crop(_A , size=_A ) if do_rescale: lowercase : Tuple = self.rescale(image=_A , scale=_A ) if do_normalize: lowercase : Union[str, Any] = self.normalize(image=_A , mean=_A , std=_A ) lowercase : Any = to_channel_dimension_format(_A , _A ) return image def __a ( self : List[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Union[str, Any] , ) -> PIL.Image.Image: """simple docstring""" lowercase : str = do_resize if do_resize is not None else self.do_resize lowercase : Optional[Any] = resample if resample is not None else self.resample lowercase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase : str = do_rescale if do_rescale is not None else self.do_rescale lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean lowercase : Optional[Any] = image_std if image_std is not None else self.image_std lowercase : str = size if size is not None else self.size lowercase : Any = get_size_dict(_A , default_to_square=_A ) lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size lowercase : str = get_size_dict(_A , param_name='''crop_size''' ) if not valid_images(_A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) lowercase : Union[str, Any] = make_batched(_A ) lowercase : Dict = [ [ self._preprocess_image( image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , ) for img in video ] for video in videos ] lowercase : Tuple = {'''pixel_values''': videos} return BatchFeature(data=_A , tensor_type=_A )
308
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available lowerCAmelCase_ = { 'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST', 'ErnieForCausalLM', 'ErnieForMaskedLM', 'ErnieForMultipleChoice', 'ErnieForNextSentencePrediction', 'ErnieForPreTraining', 'ErnieForQuestionAnswering', 'ErnieForSequenceClassification', 'ErnieForTokenClassification', 'ErnieModel', 'ErniePreTrainedModel', ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
308
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(_lowerCamelCase ) , '''Tatoeba directory does not exist.''' ) class _A ( unittest.TestCase ): @cached_property def __a ( self : int ) -> Dict: """simple docstring""" lowercase : str = tempfile.mkdtemp() return TatoebaConverter(save_dir=_A ) @slow def __a ( self : Any ) -> List[Any]: """simple docstring""" self.resolver.convert_models(['''heb-eng'''] ) @slow def __a ( self : int ) -> Tuple: """simple docstring""" lowercase , lowercase : Optional[Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_A ) assert mmeta["long_pair"] == "heb-eng"
308
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): _UpperCamelCase : Optional[int] = AltDiffusionPipeline _UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS _UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCamelCase : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCamelCase : int = TEXT_TO_IMAGE_IMAGE_PARAMS def __a ( self : Optional[int] ) -> Dict: """simple docstring""" torch.manual_seed(0 ) lowercase : Optional[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) lowercase : List[Any] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , ) torch.manual_seed(0 ) lowercase : Any = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) lowercase : List[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , ) lowercase : Union[str, Any] = CLIPTextModel(_A ) lowercase : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) lowercase : Optional[int] = 77 lowercase : List[Any] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __a ( self : Union[str, Any] , _A : int , _A : Dict=0 ) -> Any: """simple docstring""" if str(_A ).startswith('''mps''' ): lowercase : str = torch.manual_seed(_A ) else: lowercase : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A ) lowercase : Any = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def __a ( self : Dict ) -> Dict: """simple docstring""" super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def __a ( self : Dict ) -> List[str]: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def __a ( self : str ) -> Any: """simple docstring""" lowercase : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase : List[str] = self.get_dummy_components() torch.manual_seed(0 ) lowercase : int = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , ) # TODO: remove after fixing the non-deterministic text encoder lowercase : Any = RobertaSeriesModelWithTransformation(_A ) lowercase : List[Any] = text_encoder lowercase : Optional[Any] = AltDiffusionPipeline(**_A ) lowercase : Optional[Any] = alt_pipe.to(_A ) alt_pipe.set_progress_bar_config(disable=_A ) lowercase : Union[str, Any] = self.get_dummy_inputs(_A ) lowercase : int = '''A photo of an astronaut''' lowercase : List[str] = alt_pipe(**_A ) lowercase : int = output.images lowercase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase : Optional[Any] = np.array( [0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __a ( self : Optional[int] ) -> List[str]: """simple docstring""" lowercase : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase : Tuple = self.get_dummy_components() lowercase : Union[str, Any] = PNDMScheduler(skip_prk_steps=_A ) torch.manual_seed(0 ) lowercase : List[str] = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , ) # TODO: remove after fixing the non-deterministic text encoder lowercase : int = RobertaSeriesModelWithTransformation(_A ) lowercase : str = text_encoder lowercase : Dict = AltDiffusionPipeline(**_A ) lowercase : Union[str, Any] = alt_pipe.to(_A ) alt_pipe.set_progress_bar_config(disable=_A ) lowercase : Union[str, Any] = self.get_dummy_inputs(_A ) lowercase : Union[str, Any] = alt_pipe(**_A ) lowercase : Optional[Any] = output.images lowercase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase : Union[str, Any] = np.array( [0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class _A ( unittest.TestCase ): def __a ( self : Tuple ) -> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : Optional[int] ) -> List[str]: """simple docstring""" lowercase : Optional[Any] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=_A ) lowercase : Any = alt_pipe.to(_A ) alt_pipe.set_progress_bar_config(disable=_A ) lowercase : Any = '''A painting of a squirrel eating a burger''' lowercase : Dict = torch.manual_seed(0 ) lowercase : int = alt_pipe([prompt] , generator=_A , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' ) lowercase : Union[str, Any] = output.images lowercase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase : Dict = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __a ( self : Optional[Any] ) -> Dict: """simple docstring""" lowercase : Union[str, Any] = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' ) lowercase : str = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=_A , safety_checker=_A ) lowercase : Dict = alt_pipe.to(_A ) alt_pipe.set_progress_bar_config(disable=_A ) lowercase : Dict = '''A painting of a squirrel eating a burger''' lowercase : str = torch.manual_seed(0 ) lowercase : Optional[int] = alt_pipe([prompt] , generator=_A , num_inference_steps=2 , output_type='''numpy''' ) lowercase : Tuple = output.images lowercase : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase : List[Any] = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
308
from __future__ import annotations from typing import Any def snake_case( __magic_name__ ) -> None: '''simple docstring''' create_state_space_tree(__magic_name__ , [] , 0 ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> None: '''simple docstring''' if index == len(__magic_name__ ): print(__magic_name__ ) return create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 ) current_subsequence.pop() if __name__ == "__main__": lowerCAmelCase_ = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(['A', 'B', 'C']) generate_all_subsequences(seq)
308
1
from ...processing_utils import ProcessorMixin class _A ( _lowerCamelCase ): _UpperCamelCase : Dict = '''WhisperFeatureExtractor''' _UpperCamelCase : str = '''WhisperTokenizer''' def __init__( self : Optional[Any] , _A : Dict , _A : str ) -> List[Any]: """simple docstring""" super().__init__(_A , _A ) lowercase : Dict = self.feature_extractor lowercase : Dict = False def __a ( self : Tuple , _A : List[Any]=None , _A : List[Any]=None , _A : List[str]=True ) -> Any: """simple docstring""" return self.tokenizer.get_decoder_prompt_ids(task=_A , language=_A , no_timestamps=_A ) def __call__( self : str , *_A : int , **_A : Dict ) -> Optional[Any]: """simple docstring""" if self._in_target_context_manager: return self.current_processor(*_A , **_A ) lowercase : Optional[Any] = kwargs.pop('''audio''' , _A ) lowercase : Dict = kwargs.pop('''sampling_rate''' , _A ) lowercase : List[str] = kwargs.pop('''text''' , _A ) if len(_A ) > 0: lowercase : Optional[Any] = args[0] lowercase : Optional[int] = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: lowercase : List[Any] = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A ) if text is not None: lowercase : Optional[Any] = self.tokenizer(_A , **_A ) if text is None: return inputs elif audio is None: return encodings else: lowercase : List[Any] = encodings['''input_ids'''] return inputs def __a ( self : List[str] , *_A : Optional[Any] , **_A : Dict ) -> Any: """simple docstring""" return self.tokenizer.batch_decode(*_A , **_A ) def __a ( self : Optional[Any] , *_A : Tuple , **_A : Tuple ) -> str: """simple docstring""" return self.tokenizer.decode(*_A , **_A ) def __a ( self : Optional[Any] , _A : str , _A : str="np" ) -> Any: """simple docstring""" return self.tokenizer.get_prompt_ids(_A , return_tensors=_A )
308
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging lowerCAmelCase_ = logging.get_logger(__name__) class _A ( _lowerCamelCase ): _UpperCamelCase : Dict = ['''input_features'''] def __init__( self : int , _A : int=80 , _A : Union[str, Any]=16_000 , _A : Union[str, Any]=160 , _A : Any=30 , _A : str=400 , _A : Union[str, Any]=0.0 , _A : Tuple=False , **_A : List[str] , ) -> int: """simple docstring""" super().__init__( feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , ) lowercase : Optional[Any] = n_fft lowercase : Optional[int] = hop_length lowercase : Optional[int] = chunk_length lowercase : Union[str, Any] = chunk_length * sampling_rate lowercase : Optional[Any] = self.n_samples // hop_length lowercase : Optional[Any] = sampling_rate lowercase : Union[str, Any] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , ) def __a ( self : Dict , _A : np.array ) -> np.ndarray: """simple docstring""" lowercase : List[str] = spectrogram( _A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , ) lowercase : Union[str, Any] = log_spec[:, :-1] lowercase : Optional[Any] = np.maximum(_A , log_spec.max() - 8.0 ) lowercase : str = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]: """simple docstring""" if attention_mask is not None: lowercase : Optional[Any] = np.array(_A , np.intaa ) lowercase : List[str] = [] for vector, length in zip(_A , attention_mask.sum(-1 ) ): lowercase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: lowercase : int = padding_value normed_input_values.append(_A ) else: lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self : Union[str, Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : int , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) lowercase : Union[str, Any] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) lowercase : Optional[Any] = is_batched_numpy or ( isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowercase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_A , np.ndarray ): lowercase : List[Any] = np.asarray(_A , dtype=np.floataa ) elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase : List[str] = [np.asarray([raw_speech] ).T] lowercase : Tuple = BatchFeature({'''input_features''': raw_speech} ) # convert into correct format for padding lowercase : str = self.pad( _A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: lowercase : Tuple = self.zero_mean_unit_var_norm( padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , ) lowercase : str = np.stack(padded_inputs['''input_features'''] , axis=0 ) # make sure list is in array format lowercase : List[str] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 ) lowercase : str = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]] if isinstance(input_features[0] , _A ): lowercase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features] else: lowercase : Optional[int] = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) lowercase : List[str] = padded_inputs['''attention_mask'''][:, :: self.hop_length] if return_tensors is not None: lowercase : Any = padded_inputs.convert_to_tensors(_A ) return padded_inputs def __a ( self : Optional[Any] ) -> Dict[str, Any]: """simple docstring""" lowercase : Optional[Any] = copy.deepcopy(self.__dict__ ) lowercase : Dict = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
308
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class _A ( _lowerCamelCase ): _UpperCamelCase : Any = '''encoder-decoder''' _UpperCamelCase : List[Any] = True def __init__( self : List[Any] , **_A : int ) -> List[str]: """simple docstring""" super().__init__(**_A ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" lowercase : Dict = kwargs.pop('''encoder''' ) lowercase : List[Any] = encoder_config.pop('''model_type''' ) lowercase : Optional[int] = kwargs.pop('''decoder''' ) lowercase : Optional[Any] = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowercase : Any = AutoConfig.for_model(_A , **_A ) lowercase : Any = AutoConfig.for_model(_A , **_A ) lowercase : Any = True @classmethod def __a ( cls : List[Any] , _A : PretrainedConfig , _A : PretrainedConfig , **_A : Optional[int] ) -> PretrainedConfig: """simple docstring""" logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) lowercase : Any = True lowercase : Dict = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_A ) def __a ( self : Tuple ) -> Optional[Any]: """simple docstring""" lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ ) lowercase : List[Any] = self.encoder.to_dict() lowercase : Dict = self.decoder.to_dict() lowercase : Any = self.__class__.model_type return output
308
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class _A : def __init__( self : int , _A : Optional[int] , _A : Any=13 , _A : List[Any]=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : str=True , _A : Any=True , _A : Dict=True , _A : Optional[Any]=False , _A : Any=False , _A : List[str]=False , _A : Optional[int]=2 , _A : List[Any]=99 , _A : str=0 , _A : Dict=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]="last" , _A : List[str]=True , _A : Tuple=None , _A : Optional[Any]=0 , ) -> Any: """simple docstring""" lowercase : str = parent lowercase : Optional[Any] = batch_size lowercase : Union[str, Any] = seq_length lowercase : str = is_training lowercase : str = use_input_lengths lowercase : List[Any] = use_token_type_ids lowercase : Union[str, Any] = use_labels lowercase : Tuple = gelu_activation lowercase : Dict = sinusoidal_embeddings lowercase : Any = causal lowercase : str = asm lowercase : Optional[Any] = n_langs lowercase : Dict = vocab_size lowercase : Dict = n_special lowercase : List[Any] = hidden_size lowercase : str = num_hidden_layers lowercase : int = num_attention_heads lowercase : str = hidden_dropout_prob lowercase : Dict = attention_probs_dropout_prob lowercase : List[Any] = max_position_embeddings lowercase : Optional[int] = type_sequence_label_size lowercase : List[str] = initializer_range lowercase : List[str] = num_labels lowercase : int = num_choices lowercase : int = summary_type lowercase : Tuple = use_proj lowercase : Union[str, Any] = scope lowercase : List[str] = bos_token_id def __a ( self : Any ) -> Dict: """simple docstring""" lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase : str = None if self.use_input_lengths: lowercase : int = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase : Union[str, Any] = None if self.use_token_type_ids: lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase : Union[str, Any] = None lowercase : List[str] = None lowercase : Optional[Any] = None if self.use_labels: lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float() lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices ) lowercase : List[Any] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __a ( self : Any ) -> List[Any]: """simple docstring""" return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def __a ( self : int , _A : str , _A : Optional[Any] , _A : int , _A : List[str] , _A : Any , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : Tuple , ) -> List[Any]: """simple docstring""" lowercase : List[Any] = XLMModel(config=_A ) model.to(_A ) model.eval() lowercase : Tuple = model(_A , lengths=_A , langs=_A ) lowercase : Dict = model(_A , langs=_A ) lowercase : int = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : int , _A : Dict , _A : int , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : Dict , ) -> Optional[Any]: """simple docstring""" lowercase : Optional[int] = XLMWithLMHeadModel(_A ) model.to(_A ) model.eval() lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Tuple , _A : int , ) -> Union[str, Any]: """simple docstring""" lowercase : Dict = XLMForQuestionAnsweringSimple(_A ) model.to(_A ) model.eval() lowercase : List[str] = model(_A ) lowercase : Any = model(_A , start_positions=_A , end_positions=_A ) lowercase : Any = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : Any , _A : str , _A : Union[str, Any] , ) -> Dict: """simple docstring""" lowercase : Optional[int] = XLMForQuestionAnswering(_A ) model.to(_A ) model.eval() lowercase : Any = model(_A ) lowercase : Tuple = model( _A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , ) lowercase : Optional[int] = model( _A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , ) ((lowercase) , ) : Optional[int] = result_with_labels.to_tuple() lowercase : List[str] = model(_A , start_positions=_A , end_positions=_A ) ((lowercase) , ) : Any = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def __a ( self : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , ) -> int: """simple docstring""" lowercase : List[str] = XLMForSequenceClassification(_A ) model.to(_A ) model.eval() lowercase : List[str] = model(_A ) lowercase : Union[str, Any] = model(_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Union[str, Any] , _A : str , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , ) -> Dict: """simple docstring""" lowercase : Optional[Any] = self.num_labels lowercase : Tuple = XLMForTokenClassification(_A ) model.to(_A ) model.eval() lowercase : str = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self : List[Any] , _A : List[str] , _A : Dict , _A : str , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , ) -> Union[str, Any]: """simple docstring""" lowercase : int = self.num_choices lowercase : List[Any] = XLMForMultipleChoice(config=_A ) model.to(_A ) model.eval() lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Dict = model( _A , attention_mask=_A , token_type_ids=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" lowercase : List[Any] = self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Union[str, Any] = config_and_inputs lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths} return config, inputs_dict @require_torch class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): _UpperCamelCase : Any = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _UpperCamelCase : str = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _UpperCamelCase : Tuple = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def __a ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] ) -> List[Any]: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __a ( self : Dict , _A : Tuple , _A : List[str] , _A : int=False ) -> Optional[Any]: """simple docstring""" lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": lowercase : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) lowercase : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) return inputs_dict def __a ( self : Any ) -> List[str]: """simple docstring""" lowercase : List[str] = XLMModelTester(self ) lowercase : Any = ConfigTester(self , config_class=_A , emb_dim=37 ) def __a ( self : List[Any] ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def __a ( self : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*_A ) def __a ( self : Any ) -> Dict: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*_A ) def __a ( self : List[str] ) -> Optional[int]: """simple docstring""" lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*_A ) def __a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*_A ) def __a ( self : List[str] ) -> Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*_A ) def __a ( self : Dict ) -> int: """simple docstring""" lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*_A ) def __a ( self : Any ) -> List[Any]: """simple docstring""" lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*_A ) def __a ( self : int , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any: """simple docstring""" self.assertIsInstance(_A , _A ) self.assertListEqual( [isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) ) self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(_A ): # adds PAD dummy token lowercase : List[Any] = min_length + idx + 1 lowercase : str = min_length + idx + 1 lowercase : Any = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) ) def __a ( self : int , _A : Optional[int] , _A : Dict , _A : Any , _A : List[str] , _A : Optional[int] , _A : List[Any]=False , _A : List[Any]=1 ) -> str: """simple docstring""" self.assertIsInstance(_A , _A ) self.assertListEqual( [isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , ) self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(_A ): # adds PAD dummy token lowercase : Union[str, Any] = min_length + idx + 1 lowercase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , ) pass @slow def __a ( self : Optional[int] ) -> Any: """simple docstring""" for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : Any = XLMModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch class _A ( unittest.TestCase ): @slow def __a ( self : Any ) -> Optional[Any]: """simple docstring""" lowercase : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' ) model.to(_A ) lowercase : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president lowercase : List[str] = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference lowercase : Dict = model.generate(_A , do_sample=_A ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
308
1
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class _A ( unittest.TestCase ): def __init__( self : List[str] , _A : int , _A : Tuple=7 , _A : Dict=3 , _A : str=30 , _A : Dict=400 , _A : Tuple=True , _A : Union[str, Any]=None , _A : Optional[int]=True , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : List[str]=True , _A : Union[str, Any]=1 / 255 , _A : Any=True , ) -> Dict: """simple docstring""" lowercase : int = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} lowercase : Union[str, Any] = parent lowercase : List[Any] = batch_size lowercase : List[Any] = num_channels lowercase : List[Any] = min_resolution lowercase : str = max_resolution lowercase : Union[str, Any] = do_resize lowercase : List[Any] = size lowercase : Tuple = do_normalize lowercase : str = image_mean lowercase : Optional[Any] = image_std lowercase : Optional[Any] = do_rescale lowercase : Dict = rescale_factor lowercase : Tuple = do_pad def __a ( self : List[str] ) -> List[str]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __a ( self : Optional[Any] , _A : str , _A : str=False ) -> List[str]: """simple docstring""" if not batched: lowercase : int = image_inputs[0] if isinstance(_A , Image.Image ): lowercase , lowercase : List[str] = image.size else: lowercase , lowercase : List[Any] = image.shape[1], image.shape[2] if w < h: lowercase : str = int(self.size['''shortest_edge'''] * h / w ) lowercase : List[str] = self.size['''shortest_edge'''] elif w > h: lowercase : Any = self.size['''shortest_edge'''] lowercase : Union[str, Any] = int(self.size['''shortest_edge'''] * w / h ) else: lowercase : str = self.size['''shortest_edge'''] lowercase : str = self.size['''shortest_edge'''] else: lowercase : Any = [] for image in image_inputs: lowercase , lowercase : Optional[int] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowercase : Optional[int] = max(_A , key=lambda _A : item[0] )[0] lowercase : Optional[int] = max(_A , key=lambda _A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _A ( _lowerCamelCase , unittest.TestCase ): _UpperCamelCase : Optional[int] = DeformableDetrImageProcessor if is_vision_available() else None def __a ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" lowercase : Tuple = DeformableDetrImageProcessingTester(self ) @property def __a ( self : Optional[int] ) -> Dict: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : Any ) -> Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''do_rescale''' ) ) self.assertTrue(hasattr(_A , '''do_pad''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) def __a ( self : List[str] ) -> Optional[int]: """simple docstring""" lowercase : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} ) self.assertEqual(image_processor.do_pad , _A ) lowercase : Optional[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_A ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , _A ) def __a ( self : List[str] ) -> Optional[Any]: """simple docstring""" pass def __a ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input lowercase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowercase , lowercase : str = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase , lowercase : Optional[int] = self.image_processor_tester.get_expected_values(_A , batched=_A ) lowercase : Union[str, Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __a ( self : Dict ) -> Tuple: """simple docstring""" lowercase : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input lowercase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowercase , lowercase : Optional[int] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase : int = image_processing(_A , return_tensors='''pt''' ).pixel_values lowercase , lowercase : str = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __a ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input lowercase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowercase , lowercase : Optional[Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values lowercase , lowercase : int = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __a ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" lowercase : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: lowercase : Tuple = json.loads(f.read() ) lowercase : Any = {'''image_id''': 39_769, '''annotations''': target} # encode them lowercase : str = DeformableDetrImageProcessor() lowercase : List[Any] = image_processing(images=_A , annotations=_A , return_tensors='''pt''' ) # verify pixel values lowercase : str = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape , _A ) lowercase : int = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1E-4 ) ) # verify area lowercase : Union[str, Any] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) ) # verify boxes lowercase : str = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A ) lowercase : str = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1E-3 ) ) # verify image_id lowercase : Dict = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) ) # verify is_crowd lowercase : int = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) ) # verify class_labels lowercase : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) ) # verify orig_size lowercase : List[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) ) # verify size lowercase : List[str] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) ) @slow def __a ( self : Tuple ) -> str: """simple docstring""" lowercase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: lowercase : List[Any] = json.loads(f.read() ) lowercase : Tuple = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} lowercase : Optional[int] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them lowercase : List[Any] = DeformableDetrImageProcessor(format='''coco_panoptic''' ) lowercase : Dict = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' ) # verify pixel values lowercase : List[Any] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape , _A ) lowercase : List[Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1E-4 ) ) # verify area lowercase : int = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) ) # verify boxes lowercase : Union[str, Any] = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A ) lowercase : List[Any] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1E-3 ) ) # verify image_id lowercase : Optional[Any] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) ) # verify is_crowd lowercase : str = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) ) # verify class_labels lowercase : List[str] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) ) # verify masks lowercase : Optional[int] = 822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A ) # verify orig_size lowercase : Optional[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) ) # verify size lowercase : List[str] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
308
def snake_case( __magic_name__ = 50 ) -> int: '''simple docstring''' lowercase : Union[str, Any] = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
308
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _A ( unittest.TestCase ): def __init__( self : Optional[Any] , _A : Optional[Any] , _A : List[Any]=7 , _A : List[str]=3 , _A : Optional[Any]=18 , _A : str=30 , _A : int=400 , _A : Any=True , _A : Tuple=None , _A : Tuple=True , _A : str=None , _A : Optional[int]=True , ) -> Tuple: """simple docstring""" lowercase : Tuple = size if size is not None else {'''shortest_edge''': 20} lowercase : Tuple = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} lowercase : Union[str, Any] = parent lowercase : List[Any] = batch_size lowercase : Tuple = num_channels lowercase : Dict = image_size lowercase : str = min_resolution lowercase : List[str] = max_resolution lowercase : Union[str, Any] = do_resize lowercase : Optional[int] = size lowercase : Union[str, Any] = do_center_crop lowercase : Optional[int] = crop_size lowercase : str = do_flip_channel_order def __a ( self : Dict ) -> Union[str, Any]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class _A ( _lowerCamelCase , unittest.TestCase ): _UpperCamelCase : Dict = MobileViTImageProcessor if is_vision_available() else None def __a ( self : int ) -> Optional[Any]: """simple docstring""" lowercase : List[Any] = MobileViTImageProcessingTester(self ) @property def __a ( self : Any ) -> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : int ) -> str: """simple docstring""" lowercase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''center_crop''' ) ) self.assertTrue(hasattr(_A , '''do_flip_channel_order''' ) ) def __a ( self : Optional[int] ) -> Optional[int]: """simple docstring""" lowercase : str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) lowercase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def __a ( self : List[str] ) -> Dict: """simple docstring""" pass def __a ( self : str ) -> Optional[int]: """simple docstring""" lowercase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input lowercase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowercase : List[str] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __a ( self : str ) -> Dict: """simple docstring""" lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input lowercase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowercase : str = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __a ( self : List[Any] ) -> Optional[int]: """simple docstring""" lowercase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input lowercase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowercase : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
308
import os def snake_case( __magic_name__ = "input.txt" ) -> int: '''simple docstring''' with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file: lowercase : Any = [ [int(__magic_name__ ) for element in line.split(''',''' )] for line in input_file.readlines() ] lowercase : List[Any] = len(__magic_name__ ) lowercase : Any = len(matrix[0] ) lowercase : Tuple = [[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )] for i in range(__magic_name__ ): lowercase : str = matrix[i][0] for j in range(1 , __magic_name__ ): for i in range(__magic_name__ ): lowercase : Any = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , __magic_name__ ): lowercase : Any = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowercase : Any = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'''{solution() = }''')
308
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { 'caidas/swin2sr-classicalsr-x2-64': ( 'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json' ), } class _A ( _lowerCamelCase ): _UpperCamelCase : Optional[Any] = '''swin2sr''' _UpperCamelCase : Any = { '''hidden_size''': '''embed_dim''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Any , _A : int=64 , _A : str=1 , _A : Union[str, Any]=3 , _A : Optional[Any]=180 , _A : Optional[Any]=[6, 6, 6, 6, 6, 6] , _A : int=[6, 6, 6, 6, 6, 6] , _A : Union[str, Any]=8 , _A : Dict=2.0 , _A : List[str]=True , _A : Union[str, Any]=0.0 , _A : List[Any]=0.0 , _A : str=0.1 , _A : str="gelu" , _A : List[str]=False , _A : str=0.02 , _A : List[str]=1E-5 , _A : Dict=2 , _A : List[str]=1.0 , _A : int="1conv" , _A : List[Any]="pixelshuffle" , **_A : int , ) -> List[str]: """simple docstring""" super().__init__(**_A ) lowercase : Optional[Any] = image_size lowercase : Tuple = patch_size lowercase : str = num_channels lowercase : Optional[int] = embed_dim lowercase : List[str] = depths lowercase : Union[str, Any] = len(_A ) lowercase : Optional[int] = num_heads lowercase : Any = window_size lowercase : Tuple = mlp_ratio lowercase : Any = qkv_bias lowercase : int = hidden_dropout_prob lowercase : str = attention_probs_dropout_prob lowercase : Tuple = drop_path_rate lowercase : Optional[Any] = hidden_act lowercase : Tuple = use_absolute_embeddings lowercase : Dict = layer_norm_eps lowercase : str = initializer_range lowercase : List[Any] = upscale lowercase : Dict = img_range lowercase : Optional[Any] = resi_connection lowercase : List[str] = upsampler
308
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): @slow def __a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' ) lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' ) lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids lowercase : List[Any] = model(_A , labels=_A ).loss lowercase : Dict = -tf.math.reduce_mean(_A ).numpy() lowercase : Union[str, Any] = -21.228_168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
308
1
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): @register_to_config def __init__( self : Optional[int] , _A : int , _A : int , _A : int , _A : float , _A : int , _A : int , _A : int , _A : int , _A : str , _A : bool = False , ) -> Optional[Any]: """simple docstring""" super().__init__() lowercase : List[str] = nn.Embedding(_A , _A ) lowercase : str = nn.Embedding(_A , _A ) lowercase : int = False lowercase : int = nn.Dropout(p=_A ) lowercase : List[str] = TaConfig( vocab_size=_A , d_model=_A , num_heads=_A , d_kv=_A , d_ff=_A , dropout_rate=_A , feed_forward_proj=_A , is_decoder=_A , is_encoder_decoder=_A , ) lowercase : Optional[Any] = nn.ModuleList() for lyr_num in range(_A ): lowercase : Union[str, Any] = TaBlock(_A ) self.encoders.append(_A ) lowercase : Union[str, Any] = TaLayerNorm(_A ) lowercase : Optional[Any] = nn.Dropout(p=_A ) def __a ( self : Any , _A : Dict , _A : Dict ) -> str: """simple docstring""" lowercase : List[str] = self.token_embedder(_A ) lowercase : Dict = encoder_input_tokens.shape[1] lowercase : Dict = torch.arange(_A , device=encoder_input_tokens.device ) x += self.position_encoding(_A ) lowercase : int = self.dropout_pre(_A ) # inverted the attention mask lowercase : Optional[Any] = encoder_input_tokens.size() lowercase : Union[str, Any] = self.get_extended_attention_mask(_A , _A ) for lyr in self.encoders: lowercase : Union[str, Any] = lyr(_A , _A )[0] lowercase : Optional[Any] = self.layer_norm(_A ) return self.dropout_post(_A ), encoder_inputs_mask
308
from heapq import heappop, heappush import numpy as np def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> tuple[float | int, list[tuple[int, int]]]: '''simple docstring''' lowercase , lowercase : Optional[int] = grid.shape lowercase : Optional[int] = [-1, 1, 0, 0] lowercase : List[str] = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] lowercase , lowercase : Union[str, Any] = [(0, source)], set() lowercase : List[str] = np.full((rows, cols) , np.inf ) lowercase : Dict = 0 lowercase : Dict = np.empty((rows, cols) , dtype=__magic_name__ ) lowercase : Any = None while queue: ((lowercase) , (lowercase)) : Optional[Any] = heappop(__magic_name__ ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: lowercase : Tuple = [] while (x, y) != source: path.append((x, y) ) lowercase , lowercase : Optional[int] = predecessors[x, y] path.append(__magic_name__ ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(__magic_name__ ) ): lowercase , lowercase : Optional[int] = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: lowercase : List[Any] = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(__magic_name__ , (dist + 1, (nx, ny)) ) lowercase : int = dist + 1 lowercase : Optional[Any] = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
308
1
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name lowerCAmelCase_ = 2_56 class _A ( _lowerCamelCase ): _UpperCamelCase : str = ['''melgan'''] def __init__( self : List[str] , _A : SpectrogramNotesEncoder , _A : SpectrogramContEncoder , _A : TaFilmDecoder , _A : DDPMScheduler , _A : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None: """simple docstring""" super().__init__() # From MELGAN lowercase : Optional[Any] = math.log(1E-5 ) # Matches MelGAN training. lowercase : str = 4.0 # Largest value for most examples lowercase : Union[str, Any] = 128 self.register_modules( notes_encoder=_A , continuous_encoder=_A , decoder=_A , scheduler=_A , melgan=_A , ) def __a ( self : str , _A : int , _A : Union[str, Any]=(-1.0, 1.0) , _A : str=False ) -> str: """simple docstring""" lowercase , lowercase : Any = output_range if clip: lowercase : List[str] = torch.clip(_A , self.min_value , self.max_value ) # Scale to [0, 1]. lowercase : Optional[Any] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def __a ( self : List[str] , _A : List[str] , _A : Optional[int]=(-1.0, 1.0) , _A : Tuple=False ) -> Optional[Any]: """simple docstring""" lowercase , lowercase : List[Any] = input_range lowercase : List[str] = torch.clip(_A , _A , _A ) if clip else outputs # Scale to [0, 1]. lowercase : List[Any] = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def __a ( self : int , _A : Optional[Any] , _A : Optional[Any] , _A : Any ) -> int: """simple docstring""" lowercase : Union[str, Any] = input_tokens > 0 lowercase , lowercase : List[str] = self.notes_encoder( encoder_input_tokens=_A , encoder_inputs_mask=_A ) lowercase , lowercase : List[Any] = self.continuous_encoder( encoder_inputs=_A , encoder_inputs_mask=_A ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def __a ( self : Union[str, Any] , _A : Optional[Any] , _A : Tuple , _A : Dict ) -> int: """simple docstring""" lowercase : List[Any] = noise_time if not torch.is_tensor(_A ): lowercase : str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0: lowercase : Union[str, Any] = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) lowercase : List[Any] = self.decoder( encodings_and_masks=_A , decoder_input_tokens=_A , decoder_noise_time=_A ) return logits @torch.no_grad() def __call__( self : Optional[Any] , _A : List[List[int]] , _A : Optional[torch.Generator] = None , _A : int = 100 , _A : bool = True , _A : str = "numpy" , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]: """simple docstring""" if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(_A )}.""" ) lowercase : List[Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) lowercase : List[str] = np.zeros([1, 0, self.n_dims] , np.floataa ) lowercase : List[str] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_A , device=self.device ) for i, encoder_input_tokens in enumerate(_A ): if i == 0: lowercase : str = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. lowercase : List[Any] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_A , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowercase : Any = ones lowercase : Optional[Any] = self.scale_features( _A , output_range=[-1.0, 1.0] , clip=_A ) lowercase : Dict = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_A , continuous_mask=_A , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowercase : Any = randn_tensor( shape=encoder_continuous_inputs.shape , generator=_A , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(_A ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase : Any = self.decode( encodings_and_masks=_A , input_tokens=_A , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowercase : str = self.scheduler.step(_A , _A , _A , generator=_A ).prev_sample lowercase : List[Any] = self.scale_to_features(_A , input_range=[-1.0, 1.0] ) lowercase : int = mel[:1] lowercase : Dict = mel.cpu().float().numpy() lowercase : Tuple = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_A , _A ) logger.info('''Generated segment''' , _A ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( '''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' ) elif output_type == "numpy" and self.melgan is None: raise ValueError( '''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' ) if output_type == "numpy": lowercase : str = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowercase : List[Any] = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=_A )
308
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase_ = { 'configuration_mask2former': [ 'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Mask2FormerConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['Mask2FormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'Mask2FormerForUniversalSegmentation', 'Mask2FormerModel', 'Mask2FormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
308
1
import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' lowercase : List[Any] = os.path.abspath(__magic_name__ ) logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" ) # Load weights from TF model lowercase : str = tf.train.list_variables(__magic_name__ ) lowercase : Any = [] lowercase : List[Any] = [] lowercase : Dict = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") lowercase : int = full_name.split('''/''' ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(F"""Skipping non-model layer {full_name}""" ) continue if "optimizer" in full_name: logger.info(F"""Skipping optimization layer {full_name}""" ) continue if name[0] == "model": # ignore initial 'model' lowercase : Optional[int] = name[1:] # figure out how many levels deep the name is lowercase : Union[str, Any] = 0 for _name in name: if _name.startswith('''layer_with_weights''' ): depth += 1 else: break layer_depth.append(__magic_name__ ) # read data lowercase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ ) names.append('''/'''.join(__magic_name__ ) ) arrays.append(__magic_name__ ) logger.info(F"""Read a total of {len(__magic_name__ ):,} layers""" ) # Sanity check if len(set(__magic_name__ ) ) != 1: raise ValueError(F"""Found layer names with different depths (layer depth {list(set(__magic_name__ ) )})""" ) lowercase : int = list(set(__magic_name__ ) )[0] if layer_depth != 1: raise ValueError( '''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP''' ''' heads.''' ) # convert layers logger.info('''Converting weights...''' ) for full_name, array in zip(__magic_name__ , __magic_name__ ): lowercase : List[Any] = full_name.split('''/''' ) lowercase : Optional[int] = model lowercase : Optional[int] = [] for i, m_name in enumerate(__magic_name__ ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith('''layer_with_weights''' ): lowercase : Union[str, Any] = int(m_name.split('''-''' )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(['''embeddings''', '''LayerNorm'''] ) lowercase : Dict = getattr(__magic_name__ , '''embeddings''' ) lowercase : Optional[Any] = getattr(__magic_name__ , '''LayerNorm''' ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] ) lowercase : Dict = getattr(__magic_name__ , '''encoder''' ) lowercase : Optional[Any] = getattr(__magic_name__ , '''layer''' ) lowercase : str = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(['''pooler''', '''dense'''] ) lowercase : List[str] = getattr(__magic_name__ , '''pooler''' ) lowercase : Tuple = getattr(__magic_name__ , '''dense''' ) elif m_name == "embeddings": trace.append('''embeddings''' ) lowercase : str = getattr(__magic_name__ , '''embeddings''' ) if layer_num == 0: trace.append('''word_embeddings''' ) lowercase : Tuple = getattr(__magic_name__ , '''word_embeddings''' ) elif layer_num == 1: trace.append('''position_embeddings''' ) lowercase : int = getattr(__magic_name__ , '''position_embeddings''' ) elif layer_num == 2: trace.append('''token_type_embeddings''' ) lowercase : Optional[Any] = getattr(__magic_name__ , '''token_type_embeddings''' ) else: raise ValueError(F"""Unknown embedding layer with name {full_name}""" ) trace.append('''weight''' ) lowercase : Union[str, Any] = getattr(__magic_name__ , '''weight''' ) elif m_name == "_attention_layer": # self-attention layer trace.extend(['''attention''', '''self'''] ) lowercase : Union[str, Any] = getattr(__magic_name__ , '''attention''' ) lowercase : str = getattr(__magic_name__ , '''self''' ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(['''attention''', '''output''', '''LayerNorm'''] ) lowercase : str = getattr(__magic_name__ , '''attention''' ) lowercase : List[str] = getattr(__magic_name__ , '''output''' ) lowercase : Union[str, Any] = getattr(__magic_name__ , '''LayerNorm''' ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(['''attention''', '''output''', '''dense'''] ) lowercase : Tuple = getattr(__magic_name__ , '''attention''' ) lowercase : int = getattr(__magic_name__ , '''output''' ) lowercase : Union[str, Any] = getattr(__magic_name__ , '''dense''' ) elif m_name == "_output_dense": # output dense trace.extend(['''output''', '''dense'''] ) lowercase : Any = getattr(__magic_name__ , '''output''' ) lowercase : Optional[int] = getattr(__magic_name__ , '''dense''' ) elif m_name == "_output_layer_norm": # output dense trace.extend(['''output''', '''LayerNorm'''] ) lowercase : List[str] = getattr(__magic_name__ , '''output''' ) lowercase : List[str] = getattr(__magic_name__ , '''LayerNorm''' ) elif m_name == "_key_dense": # attention key trace.append('''key''' ) lowercase : List[str] = getattr(__magic_name__ , '''key''' ) elif m_name == "_query_dense": # attention query trace.append('''query''' ) lowercase : Any = getattr(__magic_name__ , '''query''' ) elif m_name == "_value_dense": # attention value trace.append('''value''' ) lowercase : Tuple = getattr(__magic_name__ , '''value''' ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(['''intermediate''', '''dense'''] ) lowercase : Tuple = getattr(__magic_name__ , '''intermediate''' ) lowercase : int = getattr(__magic_name__ , '''dense''' ) elif m_name == "_output_layer_norm": # output layer norm trace.append('''output''' ) lowercase : Dict = getattr(__magic_name__ , '''output''' ) # weights & biases elif m_name in ["bias", "beta"]: trace.append('''bias''' ) lowercase : Dict = getattr(__magic_name__ , '''bias''' ) elif m_name in ["kernel", "gamma"]: trace.append('''weight''' ) lowercase : Union[str, Any] = getattr(__magic_name__ , '''weight''' ) else: logger.warning(F"""Ignored {m_name}""" ) # for certain layers reshape is necessary lowercase : int = '''.'''.join(__magic_name__ ) if re.match(r'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , __magic_name__ ) or re.match( r'''(\S+)\.attention\.output\.dense\.weight''' , __magic_name__ ): lowercase : Any = array.reshape(pointer.data.shape ) if "kernel" in full_name: lowercase : Tuple = array.transpose() if pointer.shape == array.shape: lowercase : str = torch.from_numpy(__magic_name__ ) else: raise ValueError( F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:""" F""" {array.shape}""" ) logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" ) return model def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' logger.info(F"""Loading model based on config from {config_path}...""" ) lowercase : List[Any] = BertConfig.from_json_file(__magic_name__ ) lowercase : Dict = BertModel(__magic_name__ ) # Load weights from checkpoint logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" ) load_tfa_weights_in_bert(__magic_name__ , __magic_name__ , __magic_name__ ) # Save pytorch-model logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" ) torch.save(model.state_dict() , __magic_name__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.' ) parser.add_argument( '--bert_config_file', type=str, required=True, help='The config json file corresponding to the BERT model. This specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', type=str, required=True, help='Path to the output PyTorch model (must include filename).', ) lowerCAmelCase_ = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
308
def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : List[Any] = abs(__magic_name__ ) lowercase : Optional[Any] = 0 while n > 0: res += n % 10 n //= 10 return res def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : Optional[int] = abs(__magic_name__ ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def snake_case( __magic_name__ ) -> int: '''simple docstring''' return sum(int(__magic_name__ ) for c in str(abs(__magic_name__ ) ) ) def snake_case( ) -> None: '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(__magic_name__ , __magic_name__ ) -> None: lowercase : str = F"""{func.__name__}({value})""" lowercase : Any = timeit(F"""__main__.{call}""" , setup='''import __main__''' ) print(F"""{call:56} = {func(__magic_name__ )} -- {timing:.4f} seconds""" ) for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(__magic_name__ , __magic_name__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
308
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: lowerCAmelCase_ = None lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase_ = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), }, 'tokenizer_file': { 'google/bigbird-roberta-base': ( 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json' ), 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json' ), }, } lowerCAmelCase_ = { 'google/bigbird-roberta-base': 40_96, 'google/bigbird-roberta-large': 40_96, 'google/bigbird-base-trivia-itc': 40_96, } lowerCAmelCase_ = '▁' class _A ( _lowerCamelCase ): _UpperCamelCase : Dict = VOCAB_FILES_NAMES _UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : List[Any] = BigBirdTokenizer _UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask'''] _UpperCamelCase : List[int] = [] def __init__( self : Optional[Any] , _A : List[str]=None , _A : Optional[Any]=None , _A : int="<unk>" , _A : Dict="<s>" , _A : Optional[int]="</s>" , _A : Optional[Any]="<pad>" , _A : Tuple="[SEP]" , _A : Optional[int]="[MASK]" , _A : Union[str, Any]="[CLS]" , **_A : Dict , ) -> Union[str, Any]: """simple docstring""" lowercase : int = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token lowercase : List[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token lowercase : List[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token lowercase : int = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token lowercase : List[str] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token lowercase : int = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token # Mask token behave like a normal word, i.e. include the space before it lowercase : str = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token super().__init__( _A , tokenizer_file=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , **_A , ) lowercase : Union[str, Any] = vocab_file lowercase : Optional[Any] = False if not self.vocab_file else True def __a ( self : List[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase : List[str] = [self.sep_token_id] lowercase : int = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __a ( self : str , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(_A )) + [1] return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1] def __a ( self : Union[str, Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase : List[Any] = [self.sep_token_id] lowercase : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __a ( self : Union[str, Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(_A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase : Optional[int] = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ): copyfile(self.vocab_file , _A ) return (out_vocab_file,)
308
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def snake_case( ) -> List[str]: '''simple docstring''' lowercase : Any = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=__magic_name__ ) lowercase : Optional[Any] = parser.add_subparsers(help='''accelerate command helpers''' ) # Register commands get_config_parser(subparsers=__magic_name__ ) env_command_parser(subparsers=__magic_name__ ) launch_command_parser(subparsers=__magic_name__ ) tpu_command_parser(subparsers=__magic_name__ ) test_command_parser(subparsers=__magic_name__ ) # Let's go lowercase : Dict = parser.parse_args() if not hasattr(__magic_name__ , '''func''' ): parser.print_help() exit(1 ) # Run args.func(__magic_name__ ) if __name__ == "__main__": main()
308
1
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowerCAmelCase_ = 'bert-base-cased' lowerCAmelCase_ = 'google/pegasus-xsum' lowerCAmelCase_ = [' Sam ate lunch today.', 'Sams lunch ingredients.'] lowerCAmelCase_ = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee'] lowerCAmelCase_ = 'patrickvonplaten/t5-tiny-random' lowerCAmelCase_ = 'sshleifer/bart-tiny-random' lowerCAmelCase_ = 'sshleifer/tiny-mbart' lowerCAmelCase_ = 'sshleifer/tiny-marian-en-de' def snake_case( __magic_name__ , __magic_name__ ) -> List[Any]: '''simple docstring''' lowercase : Optional[int] = '''\n'''.join(__magic_name__ ) Path(__magic_name__ ).open('''w''' ).writelines(__magic_name__ ) def snake_case( __magic_name__ ) -> int: '''simple docstring''' for split in ["train", "val", "test"]: _dump_articles(os.path.join(__magic_name__ , F"""{split}.source""" ) , __magic_name__ ) _dump_articles(os.path.join(__magic_name__ , F"""{split}.target""" ) , __magic_name__ ) return tmp_dir class _A ( _lowerCamelCase ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __a ( self : Tuple , _A : List[Any] ) -> Tuple: """simple docstring""" lowercase : List[Any] = AutoTokenizer.from_pretrained(_A ) lowercase : Any = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowercase : int = max(len(tokenizer.encode(_A ) ) for a in ARTICLES ) lowercase : Any = max(len(tokenizer.encode(_A ) ) for a in SUMMARIES ) lowercase : int = 4 lowercase : Optional[Any] = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated lowercase , lowercase : Optional[int] = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error. lowercase : str = SeqaSeqDataset( _A , data_dir=_A , type_path='''train''' , max_source_length=_A , max_target_length=_A , src_lang=_A , tgt_lang=_A , ) lowercase : List[str] = DataLoader(_A , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(_A , _A ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place lowercase : str = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __a ( self : Union[str, Any] , _A : Union[str, Any] ) -> Any: """simple docstring""" lowercase : Optional[Any] = AutoTokenizer.from_pretrained(_A ) lowercase : List[str] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowercase : int = max(len(tokenizer.encode(_A ) ) for a in ARTICLES ) lowercase : Tuple = max(len(tokenizer.encode(_A ) ) for a in SUMMARIES ) lowercase : List[Any] = 4 lowercase : Optional[int] = LegacySeqaSeqDataset( _A , data_dir=_A , type_path='''train''' , max_source_length=20 , max_target_length=_A , ) lowercase : Dict = DataLoader(_A , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __a ( self : Optional[Any] ) -> Any: """simple docstring""" lowercase : List[Any] = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' ) lowercase : Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) lowercase : List[str] = tmp_dir.joinpath('''train.source''' ).open().readlines() lowercase : Optional[int] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(_A , _A , 128 , _A ) lowercase : Any = {x.name for x in tmp_dir.iterdir()} lowercase : int = {x.name for x in save_dir.iterdir()} lowercase : Optional[int] = save_dir.joinpath('''train.source''' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(_A ) < len(_A ) assert len(_A ) == 1 assert len(packed_examples[0] ) == sum(len(_A ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' ) def __a ( self : List[str] ) -> Optional[int]: """simple docstring""" if not FAIRSEQ_AVAILABLE: return lowercase , lowercase , lowercase : List[str] = self._get_dataset(max_len=64 ) lowercase : Optional[int] = 64 lowercase : int = ds.make_dynamic_sampler(_A , required_batch_size_multiple=_A ) lowercase : str = [len(_A ) for x in batch_sampler] assert len(set(_A ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(_A ) == len(_A ) # no dropped or added examples lowercase : Optional[int] = DataLoader(_A , batch_sampler=_A , collate_fn=ds.collate_fn , num_workers=2 ) lowercase : Any = [] lowercase : Dict = [] for batch in data_loader: lowercase : int = batch['''input_ids'''].shape lowercase : List[str] = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple lowercase : int = np.product(batch['''input_ids'''].shape ) num_src_per_batch.append(_A ) if num_src_tokens > (max_tokens * 1.1): failures.append(_A ) assert num_src_per_batch[0] == max(_A ) if failures: raise AssertionError(f"""too many tokens in {len(_A )} batches""" ) def __a ( self : Optional[int] ) -> Any: """simple docstring""" lowercase , lowercase , lowercase : List[str] = self._get_dataset(max_len=512 ) lowercase : Optional[Any] = 2 lowercase : Optional[int] = ds.make_sortish_sampler(_A , shuffle=_A ) lowercase : Union[str, Any] = DataLoader(_A , batch_size=_A , collate_fn=ds.collate_fn , num_workers=2 ) lowercase : List[str] = DataLoader(_A , batch_size=_A , collate_fn=ds.collate_fn , num_workers=2 , sampler=_A ) lowercase : Union[str, Any] = tokenizer.pad_token_id def count_pad_tokens(_A : int , _A : List[str]="input_ids" ): return [batch[k].eq(_A ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(_A , k='''labels''' ) ) < sum(count_pad_tokens(_A , k='''labels''' ) ) assert sum(count_pad_tokens(_A ) ) < sum(count_pad_tokens(_A ) ) assert len(_A ) == len(_A ) def __a ( self : Optional[Any] , _A : List[Any]=1_000 , _A : Optional[Any]=128 ) -> Tuple: """simple docstring""" if os.getenv('''USE_REAL_DATA''' , _A ): lowercase : Optional[int] = '''examples/seq2seq/wmt_en_ro''' lowercase : Optional[int] = max_len * 2 * 64 if not Path(_A ).joinpath('''train.len''' ).exists(): save_len_file(_A , _A ) else: lowercase : Tuple = '''examples/seq2seq/test_data/wmt_en_ro''' lowercase : Tuple = max_len * 4 save_len_file(_A , _A ) lowercase : Optional[int] = AutoTokenizer.from_pretrained(_A ) lowercase : List[Any] = SeqaSeqDataset( _A , data_dir=_A , type_path='''train''' , max_source_length=_A , max_target_length=_A , n_obs=_A , ) return ds, max_tokens, tokenizer def __a ( self : Optional[int] ) -> Any: """simple docstring""" lowercase , lowercase , lowercase : List[Any] = self._get_dataset() lowercase : List[str] = set(DistributedSortishSampler(_A , 256 , num_replicas=2 , rank=0 , add_extra_examples=_A ) ) lowercase : Optional[int] = set(DistributedSortishSampler(_A , 256 , num_replicas=2 , rank=1 , add_extra_examples=_A ) ) assert idsa.intersection(_A ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __a ( self : Optional[int] , _A : int ) -> Optional[int]: """simple docstring""" lowercase : Tuple = AutoTokenizer.from_pretrained(_A , use_fast=_A ) if tok_name == MBART_TINY: lowercase : Union[str, Any] = SeqaSeqDataset( _A , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , ) lowercase : Union[str, Any] = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: lowercase : Optional[int] = SeqaSeqDataset( _A , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , ) lowercase : Dict = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(_A ) == 1 if tok_name == BART_TINY else len(_A ) == 0
308
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def snake_case( __magic_name__ , __magic_name__=False ) -> List[str]: '''simple docstring''' lowercase : List[Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''module.cls_token''', '''vit.embeddings.cls_token'''), ('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''module.pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''module.norm.weight''', '''layernorm.weight'''), ('''module.norm.bias''', '''layernorm.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowercase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def snake_case( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: lowercase : Optional[int] = '''''' else: lowercase : List[Any] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase : Tuple = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" ) lowercase : List[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowercase : Tuple = in_proj_weight[ : config.hidden_size, : ] lowercase : str = in_proj_bias[: config.hidden_size] lowercase : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase : Dict = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase : Any = in_proj_weight[ -config.hidden_size :, : ] lowercase : Optional[int] = in_proj_bias[-config.hidden_size :] def snake_case( __magic_name__ ) -> int: '''simple docstring''' lowercase : str = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__magic_name__ , __magic_name__ ) def snake_case( __magic_name__ ) -> Tuple: '''simple docstring''' lowercase : Any = [ '''module.fc.fc1.weight''', '''module.fc.fc1.bias''', '''module.fc.bn1.weight''', '''module.fc.bn1.bias''', '''module.fc.bn1.running_mean''', '''module.fc.bn1.running_var''', '''module.fc.bn1.num_batches_tracked''', '''module.fc.fc2.weight''', '''module.fc.fc2.bias''', '''module.fc.bn2.weight''', '''module.fc.bn2.bias''', '''module.fc.bn2.running_mean''', '''module.fc.bn2.running_var''', '''module.fc.bn2.num_batches_tracked''', '''module.fc.fc3.weight''', '''module.fc.fc3.bias''', ] for k in ignore_keys: state_dict.pop(__magic_name__ , __magic_name__ ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' lowercase : List[Any] = dct.pop(__magic_name__ ) lowercase : Union[str, Any] = val def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' lowercase : Optional[Any] = ViTMSNConfig() lowercase : str = 10_00 lowercase : List[str] = '''datasets/huggingface/label-files''' lowercase : List[str] = '''imagenet-1k-id2label.json''' lowercase : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ ) , '''r''' ) ) lowercase : Union[str, Any] = {int(__magic_name__ ): v for k, v in idalabel.items()} lowercase : Any = idalabel lowercase : List[Any] = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: lowercase : int = 3_84 lowercase : Optional[Any] = 15_36 lowercase : Tuple = 6 elif "l16" in checkpoint_url: lowercase : Union[str, Any] = 10_24 lowercase : List[str] = 40_96 lowercase : int = 24 lowercase : Union[str, Any] = 16 lowercase : Tuple = 0.1 elif "b4" in checkpoint_url: lowercase : Union[str, Any] = 4 elif "l7" in checkpoint_url: lowercase : Dict = 7 lowercase : List[Any] = 10_24 lowercase : str = 40_96 lowercase : int = 24 lowercase : Dict = 16 lowercase : Tuple = 0.1 lowercase : int = ViTMSNModel(__magic_name__ ) lowercase : List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''target_encoder'''] lowercase : Any = ViTImageProcessor(size=config.image_size ) remove_projection_head(__magic_name__ ) lowercase : List[str] = create_rename_keys(__magic_name__ , base_model=__magic_name__ ) for src, dest in rename_keys: rename_key(__magic_name__ , __magic_name__ , __magic_name__ ) read_in_q_k_v(__magic_name__ , __magic_name__ , base_model=__magic_name__ ) model.load_state_dict(__magic_name__ ) model.eval() lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) lowercase : Dict = ViTImageProcessor( size=config.image_size , image_mean=__magic_name__ , image_std=__magic_name__ ) lowercase : List[str] = image_processor(images=__magic_name__ , return_tensors='''pt''' ) # forward pass torch.manual_seed(2 ) lowercase : int = model(**__magic_name__ ) lowercase : Optional[Any] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: lowercase : List[str] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] ) elif "b16" in checkpoint_url: lowercase : Any = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] ) elif "l16" in checkpoint_url: lowercase : Dict = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] ) elif "b4" in checkpoint_url: lowercase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] ) else: lowercase : Optional[int] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , __magic_name__ , atol=1e-4 ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(__magic_name__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__magic_name__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar', type=str, help='URL of the checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowerCAmelCase_ = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
308
1