code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> float: if discount_rate < 0: raise ValueError('Discount rate cannot be negative' ) if not cash_flows: raise ValueError('Cash flows list cannot be empty' ) __lowerCamelCase : int = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) ) return round(lowerCamelCase__ , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
73
'''simple docstring''' import datasets from .evaluate import evaluate SCREAMING_SNAKE_CASE__ = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n' SCREAMING_SNAKE_CASE__ = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n' SCREAMING_SNAKE_CASE__ = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a_ ( datasets.Metric ): def A__ ( self ) -> Tuple: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": { """id""": datasets.Value("""string""" ), """prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ), }, """references""": { """id""": datasets.Value("""string""" ), """answers""": datasets.features.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), }, } ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions} UpperCamelCase = [ { """paragraphs""": [ { """qas""": [ { """answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]], """id""": ref["""id"""], } for ref in references ] } ] } ] UpperCamelCase = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE ) return score
321
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer _A : List[str] = logging.get_logger(__name__) _A : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} _A : List[Any] = { 'vocab_file': { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json' ), }, } _A : Optional[int] = { 'yjernite/retribert-base-uncased': 5_12, } _A : Optional[int] = { 'yjernite/retribert-base-uncased': {'do_lower_case': True}, } class __SCREAMING_SNAKE_CASE ( a__ ): _UpperCAmelCase : int = VOCAB_FILES_NAMES _UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION _UpperCAmelCase : Optional[int] = RetriBertTokenizer _UpperCAmelCase : List[str] = ["input_ids", "attention_mask"] def __init__( self : Any , A : Optional[int]=None , A : Dict=None , A : Optional[Any]=True , A : Optional[Any]="[UNK]" , A : str="[SEP]" , A : Tuple="[PAD]" , A : int="[CLS]" , A : Optional[Any]="[MASK]" , A : Dict=True , A : Dict=None , **A : Union[str, Any] , ) ->int: super().__init__( A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , ) lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , A ) != do_lower_case or normalizer_state.get('''strip_accents''' , A ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , A ) != tokenize_chinese_chars ): lowerCamelCase__ = getattr(A , normalizer_state.pop('''type''' ) ) lowerCamelCase__ = do_lower_case lowerCamelCase__ = strip_accents lowerCamelCase__ = tokenize_chinese_chars lowerCamelCase__ = normalizer_class(**A ) lowerCamelCase__ = do_lower_case def __lowerCamelCase ( self : Union[str, Any] , A : str , A : Optional[int]=None ) ->List[str]: lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCamelCase ( self : int , A : List[int] , A : Optional[List[int]] = None ) ->List[str]: lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCamelCase ( self : Optional[int] , A : str , A : Optional[str] = None ) ->Tuple: lowerCamelCase__ = self._tokenizer.model.save(A , name=A ) return tuple(A )
370
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Dict: """simple docstring""" if b == 0: return 1 if (b % 2) == 0: return actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) ) else: return a * actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) ) def _a ( UpperCAmelCase , UpperCAmelCase ) -> float: """simple docstring""" if b < 0: return 1 / actual_power(UpperCAmelCase , UpperCAmelCase ) return actual_power(UpperCAmelCase , UpperCAmelCase ) if __name__ == "__main__": print(power(-2, -3))
265
0
import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow _a = logging.getLogger() @unittest.skip('Temporarily disable the doc tests.' ) @require_torch @require_tf @slow class A_ ( unittest.TestCase ): def UpperCAmelCase ( self : str , UpperCAmelCase : Path , UpperCAmelCase : Union[str, None] = None , UpperCAmelCase : Union[List[str], None] = None , UpperCAmelCase : Union[str, List[str], None] = None , UpperCAmelCase : bool = True , ) -> int: __lowerCAmelCase: str = [file for file in os.listdir(UpperCAmelCase ) if os.path.isfile(os.path.join(UpperCAmelCase , UpperCAmelCase ) )] if identifier is not None: __lowerCAmelCase: int = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(UpperCAmelCase , UpperCAmelCase ): for n_ in n_identifier: __lowerCAmelCase: Any = [file for file in files if n_ not in file] else: __lowerCAmelCase: Optional[int] = [file for file in files if n_identifier not in file] __lowerCAmelCase: Tuple = ignore_files or [] ignore_files.append('__init__.py' ) __lowerCAmelCase: Optional[Any] = [file for file in files if file not in ignore_files] for file in files: # Open all files print('Testing' , UpperCAmelCase ) if only_modules: __lowerCAmelCase: Any = file.split('.' )[0] try: __lowerCAmelCase: int = getattr(UpperCAmelCase , UpperCAmelCase ) __lowerCAmelCase: str = doctest.DocTestSuite(UpperCAmelCase ) __lowerCAmelCase: Dict = unittest.TextTestRunner().run(UpperCAmelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(F'''{module_identifier} is not a module.''' ) else: __lowerCAmelCase: Dict = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def UpperCAmelCase ( self : List[str] ) -> Any: __lowerCAmelCase: Optional[Any] = Path('src/transformers' ) __lowerCAmelCase: Dict = 'modeling' __lowerCAmelCase: str = [ 'modeling_ctrl.py', 'modeling_tf_ctrl.py', ] self.analyze_directory(UpperCAmelCase , identifier=UpperCAmelCase , ignore_files=UpperCAmelCase ) def UpperCAmelCase ( self : Dict ) -> int: __lowerCAmelCase: List[str] = Path('src/transformers' ) __lowerCAmelCase: Any = 'tokenization' self.analyze_directory(UpperCAmelCase , identifier=UpperCAmelCase ) def UpperCAmelCase ( self : Any ) -> str: __lowerCAmelCase: Optional[int] = Path('src/transformers' ) __lowerCAmelCase: int = 'configuration' self.analyze_directory(UpperCAmelCase , identifier=UpperCAmelCase ) def UpperCAmelCase ( self : Optional[int] ) -> Tuple: __lowerCAmelCase: int = Path('src/transformers' ) __lowerCAmelCase: str = ['configuration', 'modeling', 'tokenization'] self.analyze_directory(UpperCAmelCase , n_identifier=UpperCAmelCase ) def UpperCAmelCase ( self : Optional[int] ) -> List[str]: __lowerCAmelCase: str = Path('docs/source' ) __lowerCAmelCase: Dict = ['favicon.ico'] self.analyze_directory(UpperCAmelCase , ignore_files=UpperCAmelCase , only_modules=UpperCAmelCase )
322
import functools from typing import Any def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : list[str] ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or len(__UpperCamelCase ) == 0: raise ValueError("the string should be not empty string" ) if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not all( isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0 for item in words ): raise ValueError("the words should be a list of non-empty strings" ) # Build trie A_ = {} A_ = "WORD_KEEPER" for word in words: A_ = trie for c in word: if c not in trie_node: A_ = {} A_ = trie_node[c] A_ = True A_ = len(__UpperCamelCase ) # Dynamic programming method @functools.cache def is_breakable(__UpperCamelCase : int ) -> bool: if index == len_string: return True A_ = trie for i in range(__UpperCamelCase ,__UpperCamelCase ): A_ = trie_node.get(string[i] ,__UpperCamelCase ) if trie_node is None: return False if trie_node.get(__UpperCamelCase ,__UpperCamelCase ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
312
0
"""simple docstring""" import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": __A = argparse.ArgumentParser( description=( '''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned''' ''' Distillation''' ) ) parser.add_argument('''--model_type''', default='''bert''', choices=['''bert''']) parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str) parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str) parser.add_argument('''--vocab_transform''', action='''store_true''') __A = parser.parse_args() if args.model_type == "bert": __A = BertForMaskedLM.from_pretrained(args.model_name) __A = '''bert''' else: raise ValueError('''args.model_type should be "bert".''') __A = model.state_dict() __A = {} for w in ["word_embeddings", "position_embeddings"]: __A = state_dict[F"""{prefix}.embeddings.{w}.weight"""] for w in ["weight", "bias"]: __A = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""] __A = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: __A = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}""" ] __A = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}""" ] __A = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}""" ] __A = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}""" ] __A = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}""" ] __A = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}""" ] __A = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}""" ] __A = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}""" ] std_idx += 1 __A = state_dict['''cls.predictions.decoder.weight'''] __A = state_dict['''cls.predictions.bias'''] if args.vocab_transform: for w in ["weight", "bias"]: __A = state_dict[F"""cls.predictions.transform.dense.{w}"""] __A = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""] print(F"""N layers selected for distillation: {std_idx}""") print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""") print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""") torch.save(compressed_sd, args.dump_checkpoint)
350
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { '''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''', '''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''', '''uclanlp/visualbert-vqa-coco-pre''': ( '''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json''' ), '''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''', '''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''', '''uclanlp/visualbert-vcr-coco-pre''': ( '''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json''' ), '''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''', '''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''', '''uclanlp/visualbert-nlvr2-coco-pre''': ( '''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json''' ) # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert } class _snake_case ( a__ ): snake_case__ = "visual_bert" def __init__( self : int , UpperCAmelCase : Any=30522 , UpperCAmelCase : Tuple=768 , UpperCAmelCase : List[str]=512 , UpperCAmelCase : List[str]=12 , UpperCAmelCase : Tuple=12 , UpperCAmelCase : Any=3072 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : Dict=2 , UpperCAmelCase : int=0.0_2 , UpperCAmelCase : Dict=1E-12 , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : List[str]=2 , **UpperCAmelCase : str , ): super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase ) __lowerCamelCase : Optional[int] = vocab_size __lowerCamelCase : str = max_position_embeddings __lowerCamelCase : str = hidden_size __lowerCamelCase : Union[str, Any] = visual_embedding_dim __lowerCamelCase : Any = num_hidden_layers __lowerCamelCase : Union[str, Any] = num_attention_heads __lowerCamelCase : Optional[Any] = intermediate_size __lowerCamelCase : List[Any] = hidden_act __lowerCamelCase : Optional[int] = hidden_dropout_prob __lowerCamelCase : str = attention_probs_dropout_prob __lowerCamelCase : List[Any] = initializer_range __lowerCamelCase : List[str] = type_vocab_size __lowerCamelCase : str = layer_norm_eps __lowerCamelCase : List[str] = bypass_transformer __lowerCamelCase : Optional[int] = special_visual_initialize
64
0
import json import logging import os import re import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import datasets import numpy as np import torch import torchaudio from packaging import version from torch import nn import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaProcessor, is_apex_available, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""): UpperCamelCase__ = True from torch.cuda.amp import autocast UpperCamelCase__ = logging.getLogger(__name__) def _a ( SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : List[str]=None ): return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ ) @dataclass class a__ : _a : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) _a : Optional[str] = field( default=snake_case__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) _a : Optional[bool] = field( default=snake_case__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} ) _a : Optional[float] = field( default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} ) _a : Optional[float] = field( default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} ) _a : Optional[float] = field( default=0.1 , metadata={ """help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.""" } , ) _a : Optional[float] = field( default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , ) _a : Optional[float] = field( default=0.05 , metadata={ """help""": ( """Propability of each feature vector along the time axis to be chosen as the start of the vector""" """span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature""" """vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.""" ) } , ) _a : Optional[float] = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} ) @dataclass class a__ : _a : Optional[str] = field( default=snake_case__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) _a : Optional[str] = field( default="""train+validation""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) _a : bool = field( default=snake_case__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) _a : Optional[int] = field( default=snake_case__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) _a : Optional[int] = field( default=snake_case__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) _a : Optional[int] = field( default=snake_case__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of validation examples to this """ """value if set.""" ) } , ) _a : List[str] = list_field( default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , ) @dataclass class a__ : _a : WavaVecaProcessor _a : Union[bool, str] = True _a : Optional[int] = None _a : Optional[int] = None _a : Optional[int] = None _a : Optional[int] = None def __call__( self , _A ): """simple docstring""" __lowerCAmelCase = [{"input_values": feature["input_values"]} for feature in features] __lowerCAmelCase = [{"input_ids": feature["labels"]} for feature in features] __lowerCAmelCase = self.processor.pad( _A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) __lowerCAmelCase = self.processor.pad( labels=_A , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , ) # replace padding with -100 to ignore loss correctly __lowerCAmelCase = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 ) __lowerCAmelCase = labels return batch class a__ ( snake_case__ ): def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" model.train() __lowerCAmelCase = self._prepare_inputs(_A ) if self.use_amp: with autocast(): __lowerCAmelCase = self.compute_loss(_A , _A ) else: __lowerCAmelCase = self.compute_loss(_A , _A ) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == "mean": __lowerCAmelCase = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": __lowerCAmelCase = loss.sum() / (inputs["labels"] >= 0).sum() else: raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" ) if self.args.gradient_accumulation_steps > 1: __lowerCAmelCase = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(_A ).backward() elif self.use_apex: with amp.scale_loss(_A , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(_A ) else: loss.backward() return loss.detach() def _a ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses() # Detecting last checkpoint. __lowerCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowerCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s" , SCREAMING_SNAKE_CASE_ ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: __lowerCAmelCase = datasets.load_dataset( "common_voice" , data_args.dataset_config_name , split=data_args.train_split_name ) __lowerCAmelCase = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" ) # Create and save tokenizer __lowerCAmelCase = F"""[{''.join(data_args.chars_to_ignore )}]""" def remove_special_characters(SCREAMING_SNAKE_CASE_ : int ): __lowerCAmelCase = re.sub(SCREAMING_SNAKE_CASE_ , "" , batch["sentence"] ).lower() + " " return batch __lowerCAmelCase = train_dataset.map(SCREAMING_SNAKE_CASE_ , remove_columns=["sentence"] ) __lowerCAmelCase = eval_dataset.map(SCREAMING_SNAKE_CASE_ , remove_columns=["sentence"] ) def extract_all_chars(SCREAMING_SNAKE_CASE_ : Tuple ): __lowerCAmelCase = " ".join(batch["text"] ) __lowerCAmelCase = list(set(SCREAMING_SNAKE_CASE_ ) ) return {"vocab": [vocab], "all_text": [all_text]} __lowerCAmelCase = train_dataset.map( SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , batch_size=-1 , keep_in_memory=SCREAMING_SNAKE_CASE_ , remove_columns=train_dataset.column_names , ) __lowerCAmelCase = train_dataset.map( SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , batch_size=-1 , keep_in_memory=SCREAMING_SNAKE_CASE_ , remove_columns=eval_dataset.column_names , ) __lowerCAmelCase = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) ) __lowerCAmelCase = {v: k for k, v in enumerate(SCREAMING_SNAKE_CASE_ )} __lowerCAmelCase = vocab_dict[" "] del vocab_dict[" "] __lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) with open("vocab.json" , "w" ) as vocab_file: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCAmelCase = WavaVecaCTCTokenizer( "vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , ) __lowerCAmelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = WavaVecaForCTC.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , ) if data_args.max_train_samples is not None: __lowerCAmelCase = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_train_samples ) __lowerCAmelCase = train_dataset.select(range(SCREAMING_SNAKE_CASE_ ) ) if data_args.max_val_samples is not None: __lowerCAmelCase = eval_dataset.select(range(data_args.max_val_samples ) ) __lowerCAmelCase = torchaudio.transforms.Resample(4_80_00 , 1_60_00 ) # Preprocessing the datasets. # We need to read the aduio files as arrays and tokenize the targets. def speech_file_to_array_fn(SCREAMING_SNAKE_CASE_ : str ): __lowerCAmelCase , __lowerCAmelCase = torchaudio.load(batch["path"] ) __lowerCAmelCase = resampler(SCREAMING_SNAKE_CASE_ ).squeeze().numpy() __lowerCAmelCase = 1_60_00 __lowerCAmelCase = batch["text"] return batch __lowerCAmelCase = train_dataset.map( SCREAMING_SNAKE_CASE_ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) __lowerCAmelCase = eval_dataset.map( SCREAMING_SNAKE_CASE_ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) def prepare_dataset(SCREAMING_SNAKE_CASE_ : Tuple ): # check that all files have the correct sampling rate assert ( len(set(batch["sampling_rate"] ) ) == 1 ), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.""" __lowerCAmelCase = processor( audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] ) batch.update(SCREAMING_SNAKE_CASE_ ) return batch __lowerCAmelCase = train_dataset.map( SCREAMING_SNAKE_CASE_ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , ) __lowerCAmelCase = eval_dataset.map( SCREAMING_SNAKE_CASE_ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , ) # Metric __lowerCAmelCase = datasets.load_metric("wer" ) def compute_metrics(SCREAMING_SNAKE_CASE_ : Tuple ): __lowerCAmelCase = pred.predictions __lowerCAmelCase = np.argmax(SCREAMING_SNAKE_CASE_ , axis=-1 ) __lowerCAmelCase = processor.tokenizer.pad_token_id __lowerCAmelCase = processor.batch_decode(SCREAMING_SNAKE_CASE_ ) # we do not want to group tokens when computing the metrics __lowerCAmelCase = processor.batch_decode(pred.label_ids , group_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = wer_metric.compute(predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ ) return {"wer": wer} if model_args.freeze_feature_extractor: model.freeze_feature_extractor() # Data collator __lowerCAmelCase = DataCollatorCTCWithPadding(processor=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ ) # Initialize our Trainer __lowerCAmelCase = CTCTrainer( model=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , ) # Training if training_args.do_train: if last_checkpoint is not None: __lowerCAmelCase = last_checkpoint elif os.path.isdir(model_args.model_name_or_path ): __lowerCAmelCase = model_args.model_name_or_path else: __lowerCAmelCase = None # Save the feature_extractor and the tokenizer if is_main_process(training_args.local_rank ): processor.save_pretrained(training_args.output_dir ) __lowerCAmelCase = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ ) trainer.save_model() __lowerCAmelCase = train_result.metrics __lowerCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ ) ) __lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) trainer.log_metrics("train" , SCREAMING_SNAKE_CASE_ ) trainer.save_metrics("train" , SCREAMING_SNAKE_CASE_ ) trainer.save_state() # Evaluation __lowerCAmelCase = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) __lowerCAmelCase = trainer.evaluate() __lowerCAmelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE_ ) trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE_ ) return results if __name__ == "__main__": main()
92
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase : Tuple = { "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"], "tokenization_xlm": ["XLMTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : int = [ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Tuple = [ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
42
0
import os from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home _snake_case = HUGGINGFACE_HUB_CACHE _snake_case = 'config.json' _snake_case = 'diffusion_pytorch_model.bin' _snake_case = 'diffusion_flax_model.msgpack' _snake_case = 'model.onnx' _snake_case = 'diffusion_pytorch_model.safetensors' _snake_case = 'weights.pb' _snake_case = 'https://huggingface.co' _snake_case = default_cache_path _snake_case = 'diffusers_modules' _snake_case = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules''')) _snake_case = ['fp16', 'non-ema'] _snake_case = '.self_attn'
353
from __future__ import annotations from math import pi def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> dict[str, float]: if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
342
0
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _UpperCamelCase: List[str] = abspath(join(dirname(dirname(__file__)), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def lowercase__ ( _UpperCAmelCase ) -> List[Any]: '''simple docstring''' from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(_UpperCAmelCase ) def lowercase__ ( _UpperCAmelCase ) -> Dict: '''simple docstring''' from diffusers.utils.testing_utils import pytest_terminal_summary_main lowercase : List[Any] = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
255
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _UpperCamelCase: List[str] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase: Union[str, Any] = ['NllbTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase: Optional[int] = ['NllbTokenizerFast'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys _UpperCamelCase: Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
255
1
import math import sys def lowerCAmelCase_ ( __lowerCAmelCase )-> int: '''simple docstring''' if number != int(__lowerCAmelCase ): raise ValueError('''the value of input must be a natural number''' ) if number < 0: raise ValueError('''the value of input must not be a negative number''' ) if number == 0: return 1 UpperCAmelCase : Optional[int] =[-1] * (number + 1) UpperCAmelCase : Dict =0 for i in range(1 , number + 1 ): UpperCAmelCase : List[str] =sys.maxsize UpperCAmelCase : int =int(math.sqrt(__lowerCAmelCase ) ) for j in range(1 , root + 1 ): UpperCAmelCase : str =1 + answers[i - (j**2)] UpperCAmelCase : int =min(__lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : List[Any] =answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
78
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __snake_case = { '''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Swinv2ForImageClassification''', '''Swinv2ForMaskedImageModeling''', '''Swinv2Model''', '''Swinv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
78
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") __snake_case =logging.getLogger(__name__) @dataclass class UpperCAmelCase_ : lowerCamelCase : str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) lowerCamelCase : Optional[str] = field( default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCamelCase : Optional[str] = field( default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) lowerCamelCase : Optional[str] = field( default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) lowerCamelCase : bool = field( default=__lowercase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) lowerCamelCase : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) lowerCamelCase : bool = field( default=__lowercase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) @dataclass class UpperCAmelCase_ : lowerCamelCase : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The input training data file (a text file).'''} ) lowerCamelCase : Optional[str] = field( default=__lowercase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) lowerCamelCase : bool = field( default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) lowerCamelCase : Optional[int] = field( default=__lowercase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) lowerCamelCase : Optional[int] = field( default=__lowercase , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. If passed, sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) lowerCamelCase : bool = field( default=__lowercase , metadata={ '''help''': ( '''Whether to pad all samples to the maximum sentence length. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch. More ''' '''efficient on GPU but very bad for TPU.''' ) } , ) lowerCamelCase : Optional[int] = field( default=__lowercase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) lowerCamelCase : Optional[int] = field( default=__lowercase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def __UpperCAmelCase ( self : List[str] ) -> str: if self.train_file is not None: lowerCAmelCase = self.train_file.split('.' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: lowerCAmelCase = self.validation_file.split('.' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class UpperCAmelCase_ : lowerCamelCase : PreTrainedTokenizerBase lowerCamelCase : Union[bool, str, PaddingStrategy] = True lowerCamelCase : Optional[int] = None lowerCamelCase : Optional[int] = None def __call__( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] ) -> str: lowerCAmelCase = 'label' if 'label' in features[0].keys() else 'labels' lowerCAmelCase = [feature.pop(UpperCAmelCase__ ) for feature in features] lowerCAmelCase = len(UpperCAmelCase__ ) lowerCAmelCase = len(features[0]['input_ids'] ) lowerCAmelCase = [ [{k: v[i] for k, v in feature.items()} for i in range(UpperCAmelCase__ )] for feature in features ] lowerCAmelCase = list(chain(*UpperCAmelCase__ ) ) lowerCAmelCase = self.tokenizer.pad( UpperCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , ) # Un-flatten lowerCAmelCase = {k: v.view(UpperCAmelCase__ , UpperCAmelCase__ , -1 ) for k, v in batch.items()} # Add back labels lowerCAmelCase = torch.tensor(UpperCAmelCase__ , dtype=torch.intaa ) return batch def a_ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_swag' , lowerCamelCase , lowerCamelCase ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowerCAmelCase = training_args.get_process_log_level() logger.setLevel(lowerCamelCase ) datasets.utils.logging.set_verbosity(lowerCamelCase ) transformers.utils.logging.set_verbosity(lowerCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. lowerCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: lowerCAmelCase = {} if data_args.train_file is not None: lowerCAmelCase = data_args.train_file if data_args.validation_file is not None: lowerCAmelCase = data_args.validation_file lowerCAmelCase = data_args.train_file.split('.' )[-1] lowerCAmelCase = load_dataset( lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. lowerCAmelCase = load_dataset( 'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowerCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. lowerCAmelCase = [f'''ending{i}''' for i in range(4 )] lowerCAmelCase = 'sent1' lowerCAmelCase = 'sent2' if data_args.max_seq_length is None: lowerCAmelCase = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( 'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value' ' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can' ' override this default with `--block_size xxx`.' ) lowerCAmelCase = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(lowerCamelCase : Optional[Any] ): lowerCAmelCase = [[context] * 4 for context in examples[context_name]] lowerCAmelCase = examples[question_header_name] lowerCAmelCase = [ [f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase ) ] # Flatten out lowerCAmelCase = list(chain(*lowerCamelCase ) ) lowerCAmelCase = list(chain(*lowerCamelCase ) ) # Tokenize lowerCAmelCase = tokenizer( lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding='max_length' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) lowerCAmelCase = raw_datasets['train'] if data_args.max_train_samples is not None: lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_train_samples ) lowerCAmelCase = train_dataset.select(range(lowerCamelCase ) ) with training_args.main_process_first(desc='train dataset map pre-processing' ): lowerCAmelCase = train_dataset.map( lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) lowerCAmelCase = raw_datasets['validation'] if data_args.max_eval_samples is not None: lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_eval_samples ) lowerCAmelCase = eval_dataset.select(range(lowerCamelCase ) ) with training_args.main_process_first(desc='validation dataset map pre-processing' ): lowerCAmelCase = eval_dataset.map( lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator lowerCAmelCase = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(lowerCamelCase : List[Any] ): lowerCAmelCase , lowerCAmelCase = eval_predictions lowerCAmelCase = np.argmax(lowerCamelCase , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer lowerCAmelCase = Trainer( model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , ) # Training if training_args.do_train: lowerCAmelCase = None if training_args.resume_from_checkpoint is not None: lowerCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowerCAmelCase = last_checkpoint lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCamelCase ) trainer.save_model() # Saves the tokenizer too for easy upload lowerCAmelCase = train_result.metrics lowerCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase ) ) lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) ) trainer.log_metrics('train' , lowerCamelCase ) trainer.save_metrics('train' , lowerCamelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) lowerCAmelCase = trainer.evaluate() lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase ) lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) ) trainer.log_metrics('eval' , lowerCamelCase ) trainer.save_metrics('eval' , lowerCamelCase ) lowerCAmelCase = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'multiple-choice', 'dataset_tags': 'swag', 'dataset_args': 'regular', 'dataset': 'SWAG', 'language': 'en', } if training_args.push_to_hub: trainer.push_to_hub(**lowerCamelCase ) else: trainer.create_model_card(**lowerCamelCase ) def a_ ( lowerCamelCase : Optional[Any] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
4
from functools import reduce UpperCAmelCase__ = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def A ( _UpperCAmelCase : str = N ) -> int: '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) ) for i in range(len(_UpperCAmelCase ) - 12 ) ) if __name__ == "__main__": print(f"""{solution() = }""")
339
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase_ : Optional[int] = { '''configuration_chinese_clip''': [ '''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ChineseCLIPConfig''', '''ChineseCLIPOnnxConfig''', '''ChineseCLIPTextConfig''', '''ChineseCLIPVisionConfig''', ], '''processing_chinese_clip''': ['''ChineseCLIPProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Union[str, Any] = ['''ChineseCLIPFeatureExtractor'''] lowerCAmelCase_ : List[Any] = ['''ChineseCLIPImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : List[Any] = [ '''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ChineseCLIPModel''', '''ChineseCLIPPreTrainedModel''', '''ChineseCLIPTextModel''', '''ChineseCLIPVisionModel''', ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys lowerCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
170
'''simple docstring''' import numpy as np def __A ( lowerCAmelCase_ ): return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
170
1
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def __magic_name__ ( __a : List[Any] , __a : Dict , __a : int ): '''simple docstring''' UpperCamelCase__ = MobileBertConfig.from_json_file(__snake_case ) print(f"Building PyTorch model from configuration: {config}" ) UpperCamelCase__ = MobileBertForPreTraining(__snake_case ) # Load weights from tf checkpoint UpperCamelCase__ = load_tf_weights_in_mobilebert(__snake_case , __snake_case , __snake_case ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , __snake_case ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--mobilebert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained MobileBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCamelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
244
'''simple docstring''' import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights __A : Union[str, Any] = FlaxDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_UpperCAmelCase , cache_dir=_UpperCAmelCase) __A : Optional[Any] = [t[-1] for t in os.walk(os.path.join(_UpperCAmelCase , os.listdir(_UpperCAmelCase)[0] , 'snapshots'))] __A : int = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('.bin') for f in files) @slow @require_flax class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_UpperCAmelCase) __A : Dict = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) __A : Optional[Any] = jax.random.PRNGKey(0) __A : int = 4 __A : Tuple = jax.device_count() __A : Union[str, Any] = num_samples * [prompt] __A : Tuple = pipeline.prepare_inputs(_UpperCAmelCase) # shard inputs and rng __A : str = replicate(_UpperCAmelCase) __A : Tuple = jax.random.split(_UpperCAmelCase , _UpperCAmelCase) __A : Union[str, Any] = shard(_UpperCAmelCase) __A : Union[str, Any] = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1514745) < 1e-3 assert np.abs(np.abs(_UpperCAmelCase , dtype=np.floataa).sum() - 49947.875) < 5e-1 __A : List[str] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) assert len(_UpperCAmelCase) == num_samples def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : str = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_UpperCAmelCase) __A : List[Any] = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) __A : Tuple = jax.random.PRNGKey(0) __A : Any = 50 __A : str = jax.device_count() __A : Union[str, Any] = num_samples * [prompt] __A : List[str] = pipeline.prepare_inputs(_UpperCAmelCase) # shard inputs and rng __A : Dict = replicate(_UpperCAmelCase) __A : Optional[Any] = jax.random.split(_UpperCAmelCase , _UpperCAmelCase) __A : int = shard(_UpperCAmelCase) __A : Tuple = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.05652401)) < 1e-3 assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa).sum() - 2383808.2)) < 5e-1 def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : List[str] = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_UpperCAmelCase) __A : List[Any] = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) __A : str = jax.random.PRNGKey(0) __A : Any = 50 __A : Optional[int] = jax.device_count() __A : int = num_samples * [prompt] __A : Optional[int] = pipeline.prepare_inputs(_UpperCAmelCase) # shard inputs and rng __A : Optional[int] = replicate(_UpperCAmelCase) __A : List[str] = jax.random.split(_UpperCAmelCase , _UpperCAmelCase) __A : Dict = shard(_UpperCAmelCase) __A : str = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04003906)) < 1e-3 assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa).sum() - 2373516.75)) < 5e-1 def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa) __A : Union[str, Any] = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) __A : Any = jax.random.PRNGKey(0) __A : List[str] = 50 __A : Optional[int] = jax.device_count() __A : List[Any] = num_samples * [prompt] __A : List[Any] = pipeline.prepare_inputs(_UpperCAmelCase) # shard inputs and rng __A : Union[str, Any] = replicate(_UpperCAmelCase) __A : Optional[Any] = jax.random.split(_UpperCAmelCase , _UpperCAmelCase) __A : List[str] = shard(_UpperCAmelCase) __A : int = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04003906)) < 1e-3 assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa).sum() - 2373516.75)) < 5e-1 def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = FlaxDDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , ) __A ,__A : Any = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , ) __A : Optional[Any] = scheduler.create_state() __A : Any = scheduler_state __A : List[str] = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) __A : Union[str, Any] = jax.random.PRNGKey(0) __A : Optional[int] = 50 __A : Optional[Any] = jax.device_count() __A : Any = num_samples * [prompt] __A : Optional[Any] = pipeline.prepare_inputs(_UpperCAmelCase) # shard inputs and rng __A : int = replicate(_UpperCAmelCase) __A : Any = jax.random.split(_UpperCAmelCase , _UpperCAmelCase) __A : Tuple = shard(_UpperCAmelCase) __A : Union[str, Any] = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.045043945)) < 1e-3 assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa).sum() - 2347693.5)) < 5e-1 def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Dict = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) __A : int = jax.device_count() __A : List[Any] = num_samples * [prompt] __A : List[Any] = jax.random.split(jax.random.PRNGKey(0) , _UpperCAmelCase) __A ,__A : Tuple = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_UpperCAmelCase , ) __A : str = replicate(_UpperCAmelCase) __A : str = pipeline.prepare_inputs(_UpperCAmelCase) __A : str = shard(_UpperCAmelCase) __A : int = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase).images assert images.shape == (num_samples, 1, 512, 512, 3) __A : Any = images[2, 0, 256, 10:17, 1] # With memory efficient attention __A ,__A : str = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_UpperCAmelCase , use_memory_efficient_attention=_UpperCAmelCase , ) __A : Any = replicate(_UpperCAmelCase) __A : List[Any] = pipeline.prepare_inputs(_UpperCAmelCase) __A : Optional[Any] = shard(_UpperCAmelCase) __A : List[Any] = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) __A : List[Any] = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice).max() < 1e-2
190
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase ={ "configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =[ "PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST", "PegasusXForConditionalGeneration", "PegasusXModel", "PegasusXPreTrainedModel", ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys __lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
354
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class UpperCAmelCase : def __init__( self :Optional[Any] , lowercase_ :int , lowercase_ :Union[str, Any]=13 , lowercase_ :Union[str, Any]=10 , lowercase_ :Any=3 , lowercase_ :Tuple=2 , lowercase_ :List[Any]=2 , lowercase_ :int=True , lowercase_ :int=True , lowercase_ :List[str]=32 , lowercase_ :Dict=5 , lowercase_ :List[Any]=4 , lowercase_ :List[Any]=37 , lowercase_ :List[Any]="gelu" , lowercase_ :int=0.1 , lowercase_ :List[Any]=0.1 , lowercase_ :List[Any]=10 , lowercase_ :int=0.0_2 , lowercase_ :Union[str, Any]="divided_space_time" , lowercase_ :Tuple=None , )-> Tuple: A__ = parent A__ = batch_size A__ = image_size A__ = num_channels A__ = patch_size A__ = num_frames A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = attention_type A__ = initializer_range A__ = scope A__ = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token A__ = (image_size // patch_size) ** 2 A__ = (num_frames) * self.num_patches_per_frame + 1 def UpperCAmelCase_ ( self :str )-> str: A__ = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.num_labels ) A__ = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self :int )-> Any: A__ = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) A__ = self.num_labels return config def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :List[str] , lowercase_ :List[Any] , lowercase_ :Tuple )-> Optional[int]: A__ = TimesformerModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self :List[str] , lowercase_ :Tuple , lowercase_ :Tuple , lowercase_ :Dict )-> Tuple: A__ = TimesformerForVideoClassification(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_ ) # verify the logits shape A__ = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , lowercase_ ) def UpperCAmelCase_ ( self :Optional[Any] )-> str: A__ = self.prepare_config_and_inputs() A__, A__, A__ = config_and_inputs A__ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): __lowercase = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () __lowercase = ( {"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification} if is_torch_available() else {} ) __lowercase = False __lowercase = False __lowercase = False __lowercase = False def UpperCAmelCase_ ( self :Union[str, Any] )-> Optional[int]: A__ = TimesformerModelTester(self ) A__ = ConfigTester( self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 ) def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :int , lowercase_ :Dict , lowercase_ :int=False )-> str: A__ = copy.deepcopy(lowercase_ ) if return_labels: if model_class in get_values(lowercase_ ): A__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_ ) return inputs_dict def UpperCAmelCase_ ( self :Union[str, Any] )-> List[Any]: self.config_tester.run_common_tests() @unittest.skip(reason="TimeSformer does not use inputs_embeds" ) def UpperCAmelCase_ ( self :List[Any] )-> Tuple: pass def UpperCAmelCase_ ( self :Dict )-> Optional[Any]: A__, A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) ) def UpperCAmelCase_ ( self :Union[str, Any] )-> Dict: A__, A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowercase_ ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowercase_ ) def UpperCAmelCase_ ( self :Optional[Any] )-> Optional[int]: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def UpperCAmelCase_ ( self :Dict )-> Optional[int]: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*lowercase_ ) @slow def UpperCAmelCase_ ( self :Any )-> List[Any]: for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = TimesformerModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def UpperCAmelCase_ ( self :List[str] )-> str: if not self.has_attentions: pass else: A__, A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True for model_class in self.all_model_classes: A__ = self.model_tester.seq_length A__ = self.model_tester.num_frames A__ = True A__ = False A__ = True A__ = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) A__ = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ = True A__ = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) A__ = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) A__ = len(lowercase_ ) # Check attention is always last and order is fine A__ = True A__ = True A__ = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) self.assertEqual(out_len + 1 , len(lowercase_ ) ) A__ = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def UpperCAmelCase_ ( self :List[Any] )-> List[str]: def check_hidden_states_output(lowercase_ :Dict , lowercase_ :int , lowercase_ :List[Any] ): A__ = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) A__ = outputs.hidden_states A__ = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(lowercase_ ) , lowercase_ ) A__ = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) A__, A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) def UpperCamelCase ( ): A__ = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" ) A__ = np.load(_lowerCamelCase ) return list(_lowerCamelCase ) @require_torch @require_vision class UpperCAmelCase ( unittest.TestCase ): @cached_property def UpperCAmelCase_ ( self :Optional[Any] )-> int: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def UpperCAmelCase_ ( self :int )-> Any: A__ = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to( lowercase_ ) A__ = self.default_image_processor A__ = prepare_video() A__ = image_processor(video[:8] , return_tensors="pt" ).to(lowercase_ ) # forward pass with torch.no_grad(): A__ = model(**lowercase_ ) # verify the logits A__ = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , lowercase_ ) A__ = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
123
0
'''simple docstring''' from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES lowercase_ = logging.get_logger(__name__) lowercase_ = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) lowercase_ = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) lowercase_ = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) lowercase_ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) lowercase_ = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) lowercase_ = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) lowercase_ = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) lowercase_ = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) lowercase_ = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) lowercase_ = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) lowercase_ = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) lowercase_ = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) lowercase_ = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) lowercase_ = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) lowercase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) lowercase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) lowercase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) lowercase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) lowercase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) lowercase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) lowercase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) lowercase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) lowercase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class a_ ( _BaseAutoModelClass ): '''simple docstring''' UpperCamelCase = FLAX_MODEL_MAPPING lowercase_ = auto_class_update(FlaxAutoModel) class a_ ( _BaseAutoModelClass ): '''simple docstring''' UpperCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING lowercase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class a_ ( _BaseAutoModelClass ): '''simple docstring''' UpperCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING lowercase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class a_ ( _BaseAutoModelClass ): '''simple docstring''' UpperCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING lowercase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class a_ ( _BaseAutoModelClass ): '''simple docstring''' UpperCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowercase_ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class a_ ( _BaseAutoModelClass ): '''simple docstring''' UpperCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowercase_ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class a_ ( _BaseAutoModelClass ): '''simple docstring''' UpperCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING lowercase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class a_ ( _BaseAutoModelClass ): '''simple docstring''' UpperCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowercase_ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class a_ ( _BaseAutoModelClass ): '''simple docstring''' UpperCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING lowercase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class a_ ( _BaseAutoModelClass ): '''simple docstring''' UpperCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING lowercase_ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class a_ ( _BaseAutoModelClass ): '''simple docstring''' UpperCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowercase_ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class a_ ( _BaseAutoModelClass ): '''simple docstring''' UpperCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING lowercase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class a_ ( _BaseAutoModelClass ): '''simple docstring''' UpperCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING lowercase_ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
58
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) lowercase_ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ) ->Union[str, Any]: for attribute in key.split(""".""" ): _SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , __lowerCamelCase ) if weight_type is not None: _SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , __lowerCamelCase ).shape else: _SCREAMING_SNAKE_CASE = hf_pointer.shape assert hf_shape == value.shape, ( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": _SCREAMING_SNAKE_CASE = value elif weight_type == "weight_g": _SCREAMING_SNAKE_CASE = value elif weight_type == "weight_v": _SCREAMING_SNAKE_CASE = value elif weight_type == "bias": _SCREAMING_SNAKE_CASE = value else: _SCREAMING_SNAKE_CASE = value logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def lowerCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ) ->Any: _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = fairseq_model.state_dict() _SCREAMING_SNAKE_CASE = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _SCREAMING_SNAKE_CASE = False if "conv_layers" in name: load_conv_layer( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , ) _SCREAMING_SNAKE_CASE = True else: for key, mapped_key in MAPPING.items(): _SCREAMING_SNAKE_CASE = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned): _SCREAMING_SNAKE_CASE = True if "*" in mapped_key: _SCREAMING_SNAKE_CASE = name.split(__lowerCamelCase )[0].split(""".""" )[-2] _SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , __lowerCamelCase ) if "weight_g" in name: _SCREAMING_SNAKE_CASE = """weight_g""" elif "weight_v" in name: _SCREAMING_SNAKE_CASE = """weight_v""" elif "weight" in name: _SCREAMING_SNAKE_CASE = """weight""" elif "bias" in name: _SCREAMING_SNAKE_CASE = """bias""" else: _SCREAMING_SNAKE_CASE = None set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) continue if not is_used: unused_weights.append(__lowerCamelCase ) logger.warning(F'Unused weights: {unused_weights}' ) def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ) ->Union[str, Any]: _SCREAMING_SNAKE_CASE = full_name.split("""conv_layers.""" )[-1] _SCREAMING_SNAKE_CASE = name.split(""".""" ) _SCREAMING_SNAKE_CASE = int(items[0] ) _SCREAMING_SNAKE_CASE = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) _SCREAMING_SNAKE_CASE = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) _SCREAMING_SNAKE_CASE = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) _SCREAMING_SNAKE_CASE = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) _SCREAMING_SNAKE_CASE = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(__lowerCamelCase ) @torch.no_grad() def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Union[str, Any]=True ) ->Optional[int]: if config_path is not None: _SCREAMING_SNAKE_CASE = HubertConfig.from_pretrained(__lowerCamelCase ) else: _SCREAMING_SNAKE_CASE = HubertConfig() if is_finetuned: if dict_path: _SCREAMING_SNAKE_CASE = Dictionary.load(__lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _SCREAMING_SNAKE_CASE = target_dict.pad_index _SCREAMING_SNAKE_CASE = target_dict.bos_index _SCREAMING_SNAKE_CASE = target_dict.eos_index _SCREAMING_SNAKE_CASE = len(target_dict.symbols ) _SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , """vocab.json""" ) if not os.path.isdir(__lowerCamelCase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCamelCase ) ) return os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , __lowerCamelCase ) _SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer( __lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCamelCase , ) _SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == """layer""" else False _SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) _SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase ) processor.save_pretrained(__lowerCamelCase ) _SCREAMING_SNAKE_CASE = HubertForCTC(__lowerCamelCase ) else: _SCREAMING_SNAKE_CASE = HubertModel(__lowerCamelCase ) if is_finetuned: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _SCREAMING_SNAKE_CASE = model[0].eval() recursively_load_weights(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) hf_wavavec.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) lowercase_ = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
58
1
import math def A ( _lowerCamelCase = 100 ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) ) _lowerCAmelCase : Optional[int] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f'''{solution() = }''')
369
import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings _snake_case = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n" @add_start_docstrings(a) class UpperCAmelCase_ ( a): lowerCamelCase__ = 'rag' lowerCamelCase__ = True def __init__( self, __a=None, __a=True, __a=None, __a=None, __a=None, __a=None, __a=None, __a=" / ", __a=" // ", __a=5, __a=300, __a=768, __a=8, __a="wiki_dpr", __a="train", __a="compressed", __a=None, __a=None, __a=False, __a=False, __a=0.0, __a=True, __a=False, __a=False, __a=False, __a=True, __a=None, **__a, ): '''simple docstring''' super().__init__( bos_token_id=__a, pad_token_id=__a, eos_token_id=__a, decoder_start_token_id=__a, forced_eos_token_id=__a, is_encoder_decoder=__a, prefix=__a, vocab_size=__a, **__a, ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" _lowerCAmelCase : List[str] = kwargs.pop("question_encoder") _lowerCAmelCase : Union[str, Any] = question_encoder_config.pop("model_type") _lowerCAmelCase : int = kwargs.pop("generator") _lowerCAmelCase : Optional[Any] = decoder_config.pop("model_type") from ..auto.configuration_auto import AutoConfig _lowerCAmelCase : int = AutoConfig.for_model(__a, **__a) _lowerCAmelCase : Tuple = AutoConfig.for_model(__a, **__a) _lowerCAmelCase : List[Any] = reduce_loss _lowerCAmelCase : Any = label_smoothing _lowerCAmelCase : Optional[int] = exclude_bos_score _lowerCAmelCase : Optional[Any] = do_marginalize _lowerCAmelCase : Any = title_sep _lowerCAmelCase : Any = doc_sep _lowerCAmelCase : Optional[int] = n_docs _lowerCAmelCase : Optional[Any] = max_combined_length _lowerCAmelCase : List[str] = dataset _lowerCAmelCase : List[str] = dataset_split _lowerCAmelCase : Optional[Any] = index_name _lowerCAmelCase : Dict = retrieval_vector_size _lowerCAmelCase : Union[str, Any] = retrieval_batch_size _lowerCAmelCase : Optional[int] = passages_path _lowerCAmelCase : Dict = index_path _lowerCAmelCase : Tuple = use_dummy_dataset _lowerCAmelCase : Union[str, Any] = output_retrieved _lowerCAmelCase : str = do_deduplication _lowerCAmelCase : Union[str, Any] = use_cache if self.forced_eos_token_id is None: _lowerCAmelCase : Tuple = getattr(self.generator, "forced_eos_token_id", __a) @classmethod def snake_case__ ( cls, __a, __a, **__a): '''simple docstring''' return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = copy.deepcopy(self.__dict__) _lowerCAmelCase : Union[str, Any] = self.question_encoder.to_dict() _lowerCAmelCase : Any = self.generator.to_dict() _lowerCAmelCase : Optional[Any] = self.__class__.model_type return output
300
0
from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand lowercase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name def __SCREAMING_SNAKE_CASE ( snake_case_ ): '''simple docstring''' if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(snake_case_ ): return ext raise Exception( f"""Unable to determine file format from file extension {path}. """ f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" ) def __SCREAMING_SNAKE_CASE ( snake_case_ ): '''simple docstring''' _UpperCAmelCase = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) _UpperCAmelCase = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format _UpperCAmelCase = PipelineDataFormat.from_str( format=snake_case_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(snake_case_ , snake_case_ ) class __lowerCAmelCase ( UpperCAmelCase__ ): def __init__( self : Dict , snake_case__ : Pipeline , snake_case__ : PipelineDataFormat ): """simple docstring""" _UpperCAmelCase = nlp _UpperCAmelCase = reader @staticmethod def UpperCamelCase ( snake_case__ : ArgumentParser ): """simple docstring""" _UpperCAmelCase = parser.add_parser("run" , help="Run a pipeline through the CLI" ) run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" ) run_parser.add_argument("--input" , type=snake_case__ , help="Path to the file to use for inference" ) run_parser.add_argument("--output" , type=snake_case__ , help="Path to the file that will be used post to write results." ) run_parser.add_argument("--model" , type=snake_case__ , help="Name or path to the model to instantiate." ) run_parser.add_argument("--config" , type=snake_case__ , help="Name or path to the model's config to instantiate." ) run_parser.add_argument( "--tokenizer" , type=snake_case__ , help="Name of the tokenizer to use. (default: same as the model name)" ) run_parser.add_argument( "--column" , type=snake_case__ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , ) run_parser.add_argument( "--format" , type=snake_case__ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , ) run_parser.add_argument( "--device" , type=snake_case__ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , ) run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." ) run_parser.set_defaults(func=snake_case__ ) def UpperCamelCase ( self : Optional[int] ): """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self._nlp, [] for entry in self._reader: _UpperCAmelCase = nlp(**snake_case__ ) if self._reader.is_multi_columns else nlp(snake_case__ ) if isinstance(snake_case__ , snake_case__ ): outputs.append(snake_case__ ) else: outputs += output # Saving data if self._nlp.binary_output: _UpperCAmelCase = self._reader.save_binary(snake_case__ ) logger.warning(F"""Current pipeline requires output to be in binary format, saving at {binary_path}""" ) else: self._reader.save(snake_case__ )
133
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class __lowerCAmelCase ( UpperCAmelCase__ ): snake_case_ : str = "Wav2Vec2FeatureExtractor" snake_case_ : Dict = "AutoTokenizer" def __init__( self : Tuple , snake_case__ : Optional[Any] , snake_case__ : Dict ): """simple docstring""" super().__init__(snake_case__ , snake_case__ ) _UpperCAmelCase = self.feature_extractor _UpperCAmelCase = False @classmethod def UpperCamelCase ( cls : List[Any] , snake_case__ : Optional[Any] , **snake_case__ : Any ): """simple docstring""" try: return super().from_pretrained(snake_case__ , **snake_case__ ) except OSError: warnings.warn( F"""Loading a tokenizer inside {cls.__name__} from a config that does not""" " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: " , snake_case__ , ) _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(snake_case__ , **snake_case__ ) _UpperCAmelCase = WavaVecaCTCTokenizer.from_pretrained(snake_case__ , **snake_case__ ) return cls(feature_extractor=snake_case__ , tokenizer=snake_case__ ) def __call__( self : int , *snake_case__ : Tuple , **snake_case__ : List[str] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*snake_case__ , **snake_case__ ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) _UpperCAmelCase = kwargs.pop("raw_speech" ) else: _UpperCAmelCase = kwargs.pop("audio" , snake_case__ ) _UpperCAmelCase = kwargs.pop("sampling_rate" , snake_case__ ) _UpperCAmelCase = kwargs.pop("text" , snake_case__ ) if len(snake_case__ ) > 0: _UpperCAmelCase = args[0] _UpperCAmelCase = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: _UpperCAmelCase = self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ ) if text is not None: _UpperCAmelCase = self.tokenizer(snake_case__ , **snake_case__ ) if text is None: return inputs elif audio is None: return encodings else: _UpperCAmelCase = encodings["input_ids"] return inputs def UpperCamelCase ( self : List[str] , *snake_case__ : Any , **snake_case__ : Dict ): """simple docstring""" if self._in_target_context_manager: return self.current_processor.pad(*snake_case__ , **snake_case__ ) _UpperCAmelCase = kwargs.pop("input_features" , snake_case__ ) _UpperCAmelCase = kwargs.pop("labels" , snake_case__ ) if len(snake_case__ ) > 0: _UpperCAmelCase = args[0] _UpperCAmelCase = args[1:] if input_features is not None: _UpperCAmelCase = self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ ) if labels is not None: _UpperCAmelCase = self.tokenizer.pad(snake_case__ , **snake_case__ ) if labels is None: return input_features elif input_features is None: return labels else: _UpperCAmelCase = labels["input_ids"] return input_features def UpperCamelCase ( self : str , *snake_case__ : List[str] , **snake_case__ : Dict ): """simple docstring""" return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def UpperCamelCase ( self : Union[str, Any] , *snake_case__ : List[str] , **snake_case__ : Union[str, Any] ): """simple docstring""" return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @contextmanager def UpperCamelCase ( self : Tuple ): """simple docstring""" warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) _UpperCAmelCase = True _UpperCAmelCase = self.tokenizer yield _UpperCAmelCase = self.feature_extractor _UpperCAmelCase = False
133
1
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo A_ : List[Any] = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" A_ : List[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" A_ : Dict = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a_ ( datasets.Metric ): '''simple docstring''' def a__ (self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ), id='references' ), } ), ) def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1, lowerCamelCase_ = 4, ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=lowerCamelCase_, hypotheses=lowerCamelCase_, min_len=lowerCamelCase_, max_len=lowerCamelCase_ ) }
316
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) A_ : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Dict = ["EncoderDecoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = ["TFEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = ["FlaxEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
316
1
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging A : str = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCamelCase (UpperCAmelCase__ ): """simple docstring""" def __init__( self : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Any , ) -> int: super().__init__() if safety_checker is None: logger.warning( F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure''' " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( speech_model=_SCREAMING_SNAKE_CASE , speech_processor=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , ) def __A ( self : Dict , __magic_name__ : Union[str, Any] = "auto" ) -> Optional[Any]: if slice_size == "auto": SCREAMING_SNAKE_CASE_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE ) def __A ( self : List[Any] ) -> Optional[Any]: self.enable_attention_slicing(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Optional[int]=16_000 , __magic_name__ : List[str] = 512 , __magic_name__ : Dict = 512 , __magic_name__ : Dict = 50 , __magic_name__ : str = 7.5 , __magic_name__ : List[str] = None , __magic_name__ : int = 1 , __magic_name__ : int = 0.0 , __magic_name__ : Any = None , __magic_name__ : List[str] = None , __magic_name__ : Union[str, Any] = "pil" , __magic_name__ : List[Any] = True , __magic_name__ : List[Any] = None , __magic_name__ : Union[str, Any] = 1 , **__magic_name__ : Tuple , ) -> Any: SCREAMING_SNAKE_CASE_ = self.speech_processor.feature_extractor( _SCREAMING_SNAKE_CASE , return_tensors="pt" , sampling_rate=_SCREAMING_SNAKE_CASE ).input_features.to(self.device ) SCREAMING_SNAKE_CASE_ = self.speech_model.generate(_SCREAMING_SNAKE_CASE , max_length=480_000 ) SCREAMING_SNAKE_CASE_ = self.speech_processor.tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , normalize=_SCREAMING_SNAKE_CASE )[ 0 ] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = 1 elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) else: raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(_SCREAMING_SNAKE_CASE )}.''' ) # get prompt text embeddings SCREAMING_SNAKE_CASE_ = self.tokenizer( _SCREAMING_SNAKE_CASE , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) SCREAMING_SNAKE_CASE_ = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) SCREAMING_SNAKE_CASE_ = text_input_ids[:, : self.tokenizer.model_max_length] SCREAMING_SNAKE_CASE_ = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method SCREAMING_SNAKE_CASE_ = text_embeddings.shape SCREAMING_SNAKE_CASE_ = text_embeddings.repeat(1 , _SCREAMING_SNAKE_CASE , 1 ) SCREAMING_SNAKE_CASE_ = text_embeddings.view(bs_embed * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: SCREAMING_SNAKE_CASE_ = 42 if negative_prompt is None: SCREAMING_SNAKE_CASE_ = [""""""] * batch_size elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ): raise TypeError( F'''`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !=''' F''' {type(_SCREAMING_SNAKE_CASE )}.''' ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = [negative_prompt] elif batch_size != len(_SCREAMING_SNAKE_CASE ): raise ValueError( F'''`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:''' F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' " the batch size of `prompt`." ) else: SCREAMING_SNAKE_CASE_ = negative_prompt SCREAMING_SNAKE_CASE_ = text_input_ids.shape[-1] SCREAMING_SNAKE_CASE_ = self.tokenizer( _SCREAMING_SNAKE_CASE , padding="max_length" , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors="pt" , ) SCREAMING_SNAKE_CASE_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method SCREAMING_SNAKE_CASE_ = uncond_embeddings.shape[1] SCREAMING_SNAKE_CASE_ = uncond_embeddings.repeat(1 , _SCREAMING_SNAKE_CASE , 1 ) SCREAMING_SNAKE_CASE_ = uncond_embeddings.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes SCREAMING_SNAKE_CASE_ = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. SCREAMING_SNAKE_CASE_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) SCREAMING_SNAKE_CASE_ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps SCREAMING_SNAKE_CASE_ = torch.randn(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device="cpu" , dtype=_SCREAMING_SNAKE_CASE ).to( self.device ) else: SCREAMING_SNAKE_CASE_ = torch.randn(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=_SCREAMING_SNAKE_CASE ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) SCREAMING_SNAKE_CASE_ = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler SCREAMING_SNAKE_CASE_ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] SCREAMING_SNAKE_CASE_ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) SCREAMING_SNAKE_CASE_ = {} if accepts_eta: SCREAMING_SNAKE_CASE_ = eta for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ): # expand the latents if we are doing classifier free guidance SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # predict the noise residual SCREAMING_SNAKE_CASE_ = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE ).sample # perform guidance if do_classifier_free_guidance: SCREAMING_SNAKE_CASE_ = noise_pred.chunk(2 ) SCREAMING_SNAKE_CASE_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 SCREAMING_SNAKE_CASE_ = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = 1 / 0.1_8215 * latents SCREAMING_SNAKE_CASE_ = self.vae.decode(_SCREAMING_SNAKE_CASE ).sample SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_SCREAMING_SNAKE_CASE ) if not return_dict: return image return StableDiffusionPipelineOutput(images=_SCREAMING_SNAKE_CASE , nsfw_content_detected=_SCREAMING_SNAKE_CASE )
118
"""simple docstring""" from math import pi, sqrt, tan def _snake_case ( UpperCamelCase : float ): if side_length < 0: raise ValueError("""surface_area_cube() only accepts non-negative values""" ) return 6 * side_length**2 def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ): if length < 0 or breadth < 0 or height < 0: raise ValueError("""surface_area_cuboid() only accepts non-negative values""" ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def _snake_case ( UpperCamelCase : float ): if radius < 0: raise ValueError("""surface_area_sphere() only accepts non-negative values""" ) return 4 * pi * radius**2 def _snake_case ( UpperCamelCase : float ): if radius < 0: raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" ) return 3 * pi * radius**2 def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ): if radius < 0 or height < 0: raise ValueError("""surface_area_cone() only accepts non-negative values""" ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ): if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( """surface_area_conical_frustum() only accepts non-negative values""" ) UpperCAmelCase : Tuple = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ): if radius < 0 or height < 0: raise ValueError("""surface_area_cylinder() only accepts non-negative values""" ) return 2 * pi * radius * (height + radius) def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ): if torus_radius < 0 or tube_radius < 0: raise ValueError("""surface_area_torus() only accepts non-negative values""" ) if torus_radius < tube_radius: raise ValueError( """surface_area_torus() does not support spindle or self intersecting tori""" ) return 4 * pow(UpperCamelCase , 2 ) * torus_radius * tube_radius def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ): if length < 0 or width < 0: raise ValueError("""area_rectangle() only accepts non-negative values""" ) return length * width def _snake_case ( UpperCamelCase : float ): if side_length < 0: raise ValueError("""area_square() only accepts non-negative values""" ) return side_length**2 def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ): if base < 0 or height < 0: raise ValueError("""area_triangle() only accepts non-negative values""" ) return (base * height) / 2 def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ): if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError("""Given three sides do not form a triangle""" ) UpperCAmelCase : Union[str, Any] = (sidea + sidea + sidea) / 2 UpperCAmelCase : Union[str, Any] = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ): if base < 0 or height < 0: raise ValueError("""area_parallelogram() only accepts non-negative values""" ) return base * height def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ): if basea < 0 or basea < 0 or height < 0: raise ValueError("""area_trapezium() only accepts non-negative values""" ) return 1 / 2 * (basea + basea) * height def _snake_case ( UpperCamelCase : float ): if radius < 0: raise ValueError("""area_circle() only accepts non-negative values""" ) return pi * radius**2 def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ): if radius_x < 0 or radius_y < 0: raise ValueError("""area_ellipse() only accepts non-negative values""" ) return pi * radius_x * radius_y def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ): if diagonal_a < 0 or diagonal_a < 0: raise ValueError("""area_rhombus() only accepts non-negative values""" ) return 1 / 2 * diagonal_a * diagonal_a def _snake_case ( UpperCamelCase : int , UpperCamelCase : float ): if not isinstance(UpperCamelCase , UpperCamelCase ) or sides < 3: raise ValueError( """area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides""" ) elif length < 0: raise ValueError( """area_reg_polygon() only accepts non-negative values as \ length of a side""" ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print("[DEMO] Areas of various geometric shapes: \n") print(f"""Rectangle: {area_rectangle(1_0, 2_0) = }""") print(f"""Square: {area_square(1_0) = }""") print(f"""Triangle: {area_triangle(1_0, 1_0) = }""") print(f"""Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }""") print(f"""Parallelogram: {area_parallelogram(1_0, 2_0) = }""") print(f"""Rhombus: {area_rhombus(1_0, 2_0) = }""") print(f"""Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }""") print(f"""Circle: {area_circle(2_0) = }""") print(f"""Ellipse: {area_ellipse(1_0, 2_0) = }""") print("\nSurface Areas of various geometric shapes: \n") print(f"""Cube: {surface_area_cube(2_0) = }""") print(f"""Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }""") print(f"""Sphere: {surface_area_sphere(2_0) = }""") print(f"""Hemisphere: {surface_area_hemisphere(2_0) = }""") print(f"""Cone: {surface_area_cone(1_0, 2_0) = }""") print(f"""Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }""") print(f"""Cylinder: {surface_area_cylinder(1_0, 2_0) = }""") print(f"""Torus: {surface_area_torus(2_0, 1_0) = }""") print(f"""Equilateral Triangle: {area_reg_polygon(3, 1_0) = }""") print(f"""Square: {area_reg_polygon(4, 1_0) = }""") print(f"""Reqular Pentagon: {area_reg_polygon(5, 1_0) = }""")
109
0
import inspect import unittest from transformers import RegNetConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import RegNetForImageClassification, RegNetModel from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __snake_case : def __init__( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[Any]=32 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : Optional[Any]=10 , _UpperCAmelCase : Optional[int]=[10, 20, 30, 40] , _UpperCAmelCase : List[str]=[1, 1, 2, 1] , _UpperCAmelCase : Any=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : int="relu" , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[Any]=None , ) -> Dict: '''simple docstring''' _lowerCAmelCase : Optional[Any] = parent _lowerCAmelCase : Any = batch_size _lowerCAmelCase : int = image_size _lowerCAmelCase : Optional[Any] = num_channels _lowerCAmelCase : str = embeddings_size _lowerCAmelCase : List[str] = hidden_sizes _lowerCAmelCase : List[Any] = depths _lowerCAmelCase : Union[str, Any] = is_training _lowerCAmelCase : Dict = use_labels _lowerCAmelCase : Optional[Any] = hidden_act _lowerCAmelCase : Dict = num_labels _lowerCAmelCase : List[Any] = scope _lowerCAmelCase : Tuple = len(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: '''simple docstring''' _lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase : Optional[Any] = None if self.use_labels: _lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) _lowerCAmelCase : Dict = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> Optional[int]: '''simple docstring''' _lowerCAmelCase : Tuple = RegNetModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase : Tuple = model(_UpperCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ) -> List[Any]: '''simple docstring''' _lowerCAmelCase : Tuple = self.num_labels _lowerCAmelCase : int = RegNetForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: '''simple docstring''' _lowerCAmelCase : int = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = config_and_inputs _lowerCAmelCase : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __snake_case (_a , _a , unittest.TestCase ): lowerCAmelCase__ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else () lowerCAmelCase__ = ( {"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification} if is_torch_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: '''simple docstring''' _lowerCAmelCase : Optional[int] = RegNetModelTester(self ) _lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: '''simple docstring''' return @unittest.skip(reason="""RegNet does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason="""RegNet does not support input and output embeddings""" ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Any ) -> int: '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : Optional[int] = model_class(_UpperCAmelCase ) _lowerCAmelCase : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase : str = [*signature.parameters.keys()] _lowerCAmelCase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str ) -> Dict: '''simple docstring''' _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : Any = model_class(config=_UpperCAmelCase ) for name, module in model.named_modules(): if isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: '''simple docstring''' def check_hidden_states_output(_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ): _lowerCAmelCase : str = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): _lowerCAmelCase : Tuple = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) _lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _lowerCAmelCase : Dict = self.model_tester.num_stages self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) _lowerCAmelCase , _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : Optional[int] = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: _lowerCAmelCase : List[Any] = layer_type _lowerCAmelCase : Tuple = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase : int = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: '''simple docstring''' _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: '''simple docstring''' for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : Tuple = RegNetModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def _UpperCAmelCase (): '''simple docstring''' _lowerCAmelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __snake_case (unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: '''simple docstring''' return ( AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: '''simple docstring''' _lowerCAmelCase : Dict = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_UpperCAmelCase ) _lowerCAmelCase : Optional[Any] = self.default_image_processor _lowerCAmelCase : Union[str, Any] = prepare_img() _lowerCAmelCase : int = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): _lowerCAmelCase : int = model(**_UpperCAmelCase ) # verify the logits _lowerCAmelCase : int = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) _lowerCAmelCase : List[Any] = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
159
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowerCamelCase : Optional[Any] = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[int] = ["ConditionalDetrFeatureExtractor"] _lowerCamelCase : Optional[Any] = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys _lowerCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
159
1
import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, unittest.TestCase ): UpperCAmelCase__ = IFImgaImgSuperResolutionPipeline UpperCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""} UpperCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} ) UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - {"""latents"""} def A_ ( self : int ) -> Tuple: return self._get_superresolution_dummy_components() def A_ ( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any]=0 ) -> List[Any]: if str(UpperCAmelCase ).startswith('mps' ): lowerCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase ) else: lowerCamelCase__ : Tuple = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase ) lowerCamelCase__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase ) lowerCamelCase__ : List[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'original_image': original_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def A_ ( self : int ) -> Tuple: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def A_ ( self : Tuple ) -> List[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def A_ ( self : str ) -> str: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def A_ ( self : List[Any] ) -> Optional[int]: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def A_ ( self : int ) -> Dict: self._test_save_load_local() def A_ ( self : str ) -> List[Any]: self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
50
import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=None , _UpperCAmelCase="no" , _UpperCAmelCase="29500" ) -> Tuple: lowerCamelCase__ : Dict = False lowerCamelCase__ : Dict = False if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ): lowerCamelCase__ : Optional[Any] = True elif "IPython" in sys.modules: lowerCamelCase__ : Optional[Any] = 'google.colab' in str(sys.modules['IPython'].get_ipython() ) try: lowerCamelCase__ : List[str] = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" ) if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , _UpperCAmelCase ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( 'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside ' 'your training function. Restart your notebook and make sure no cells initializes an ' '`Accelerator`.' ) if num_processes is None: lowerCamelCase__ : Optional[Any] = 8 lowerCamelCase__ : List[str] = PrepareForLaunch(_UpperCAmelCase , distributed_type='TPU' ) print(F"""Launching a training on {num_processes} TPU cores.""" ) xmp.spawn(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print('Launching training on one GPU.' ) else: print('Launching training on one CPU.' ) function(*_UpperCAmelCase ) else: if num_processes is None: raise ValueError( 'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( 'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized ' 'inside your training function. Restart your notebook and make sure no cells initializes an ' '`Accelerator`.' ) if torch.cuda.is_initialized(): raise ValueError( 'To launch a multi-GPU training from your notebook, you need to avoid running any instruction ' 'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA ' 'function.' ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=_UpperCAmelCase , master_addr='127.0.01' , master_port=_UpperCAmelCase , mixed_precision=_UpperCAmelCase ): lowerCamelCase__ : Optional[int] = PrepareForLaunch(_UpperCAmelCase , distributed_type='MULTI_GPU' ) print(F"""Launching training on {num_processes} GPUs.""" ) try: start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( 'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. ' 'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. ' 'Please review your imports and test them when running the `notebook_launcher()` to identify ' 'which one is problematic.' ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): lowerCamelCase__ : int = '1' print('Launching training on MPS.' ) elif torch.cuda.is_available(): print('Launching training on one GPU.' ) else: print('Launching training on CPU.' ) function(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=2 ) -> Optional[Any]: from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=_UpperCAmelCase , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ): lowerCamelCase__ : Optional[Any] = PrepareForLaunch(_UpperCAmelCase , debug=_UpperCAmelCase ) start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
50
1
"""simple docstring""" import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __A = '''platform''' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def lowercase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: List[str] , _lowerCamelCase: Union[str, Any]=None , _lowerCamelCase: int=None , _lowerCamelCase: str=None , _lowerCamelCase: Any=None , _lowerCamelCase: int=None , _lowerCamelCase: str=None , ) -> List[Any]: '''simple docstring''' if attention_mask is None: __lowerCamelCase : Optional[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: __lowerCamelCase : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: __lowerCamelCase : Tuple = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __lowerCamelCase : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __lowerCamelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class _snake_case : def __init__( self : str , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any]=13 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : int=4 , UpperCAmelCase : List[Any]=4 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Tuple=32 , UpperCAmelCase : Any=2 , UpperCAmelCase : List[Any]=1 , UpperCAmelCase : str=0 , UpperCAmelCase : Union[str, Any]=0.0_2 , ): __lowerCamelCase : Tuple = parent __lowerCamelCase : List[Any] = batch_size __lowerCamelCase : Dict = seq_length __lowerCamelCase : Dict = is_training __lowerCamelCase : List[Any] = use_labels __lowerCamelCase : Optional[Any] = vocab_size __lowerCamelCase : Optional[Any] = hidden_size __lowerCamelCase : Union[str, Any] = num_hidden_layers __lowerCamelCase : List[str] = num_attention_heads __lowerCamelCase : Dict = intermediate_size __lowerCamelCase : Tuple = hidden_act __lowerCamelCase : Dict = hidden_dropout_prob __lowerCamelCase : Optional[Any] = attention_probs_dropout_prob __lowerCamelCase : Tuple = max_position_embeddings __lowerCamelCase : str = eos_token_id __lowerCamelCase : List[Any] = pad_token_id __lowerCamelCase : int = bos_token_id __lowerCamelCase : List[Any] = initializer_range def lowerCamelCase__ ( self : Optional[Any] ): __lowerCamelCase : Tuple = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) __lowerCamelCase : List[str] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) __lowerCamelCase : Optional[int] = shift_tokens_right(UpperCAmelCase , 1 , 2 ) __lowerCamelCase : str = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase , ) __lowerCamelCase : int = prepare_blenderbot_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return config, inputs_dict def lowerCamelCase__ ( self : int ): __lowerCamelCase : str = self.prepare_config_and_inputs() return config, inputs_dict def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[str] ): __lowerCamelCase : int = 20 __lowerCamelCase : Tuple = model_class_name(UpperCAmelCase ) __lowerCamelCase : Union[str, Any] = model.encode(inputs_dict["input_ids"] ) __lowerCamelCase : List[Any] = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) __lowerCamelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase ) __lowerCamelCase : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" ) __lowerCamelCase : List[str] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __lowerCamelCase : Optional[Any] = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , ) __lowerCamelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) __lowerCamelCase : Union[str, Any] = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase , ) __lowerCamelCase : Optional[Any] = model.decode(UpperCAmelCase , UpperCAmelCase ) __lowerCamelCase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def lowerCamelCase__ ( self : str , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] ): __lowerCamelCase : List[Any] = 20 __lowerCamelCase : Any = model_class_name(UpperCAmelCase ) __lowerCamelCase : int = model.encode(inputs_dict["input_ids"] ) __lowerCamelCase : List[str] = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) __lowerCamelCase : str = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __lowerCamelCase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase ) __lowerCamelCase : Union[str, Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __lowerCamelCase : Optional[Any] = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , ) __lowerCamelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) __lowerCamelCase : Any = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , ) __lowerCamelCase : Dict = model.decode(UpperCAmelCase , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase ) __lowerCamelCase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) @require_flax class _snake_case ( unittest.TestCase ): snake_case__ = 99 def lowerCamelCase__ ( self : Optional[Any] ): __lowerCamelCase : Optional[int] = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) __lowerCamelCase : List[Any] = input_ids.shape[0] __lowerCamelCase : Tuple = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def lowerCamelCase__ ( self : str ): __lowerCamelCase : Optional[int] = self._get_config_and_data() __lowerCamelCase : List[Any] = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase ) __lowerCamelCase : List[str] = lm_model(input_ids=UpperCAmelCase ) __lowerCamelCase : Dict = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape , UpperCAmelCase ) def lowerCamelCase__ ( self : List[Any] ): __lowerCamelCase : Tuple = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) __lowerCamelCase : List[Any] = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase ) __lowerCamelCase : Tuple = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) __lowerCamelCase : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) __lowerCamelCase : Optional[int] = lm_model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase ) __lowerCamelCase : Optional[int] = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape , UpperCAmelCase ) def lowerCamelCase__ ( self : List[str] ): __lowerCamelCase : List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) __lowerCamelCase : Optional[Any] = shift_tokens_right(UpperCAmelCase , 1 , 2 ) __lowerCamelCase : str = np.equal(UpperCAmelCase , 1 ).astype(np.floataa ).sum() __lowerCamelCase : Optional[int] = np.equal(UpperCAmelCase , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(UpperCAmelCase , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class _snake_case ( a__ , unittest.TestCase , a__ ): snake_case__ = True snake_case__ = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) snake_case__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def lowerCamelCase__ ( self : List[str] ): __lowerCamelCase : Tuple = FlaxBlenderbotModelTester(self ) def lowerCamelCase__ ( self : Dict ): __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def lowerCamelCase__ ( self : Dict ): __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowerCamelCase : List[Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) __lowerCamelCase : Tuple = model_class(UpperCAmelCase ) @jax.jit def encode_jitted(UpperCAmelCase : Dict , UpperCAmelCase : int=None , **UpperCAmelCase : Optional[int] ): return model.encode(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase ) with self.subTest("JIT Enabled" ): __lowerCamelCase : Optional[int] = encode_jitted(**UpperCAmelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __lowerCamelCase : Dict = encode_jitted(**UpperCAmelCase ).to_tuple() self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Any ): __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowerCamelCase : Dict = model_class(UpperCAmelCase ) __lowerCamelCase : Optional[int] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] ) __lowerCamelCase : Optional[Any] = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ): return model.decode( decoder_input_ids=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , encoder_outputs=UpperCAmelCase , ) with self.subTest("JIT Enabled" ): __lowerCamelCase : Optional[Any] = decode_jitted(**UpperCAmelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __lowerCamelCase : Optional[Any] = decode_jitted(**UpperCAmelCase ).to_tuple() self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : str ): for model_class_name in self.all_model_classes: __lowerCamelCase : List[Any] = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids __lowerCamelCase : Tuple = np.ones((1, 1) ) * model.config.eos_token_id __lowerCamelCase : str = model(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) @unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." ) @slow def lowerCamelCase__ ( self : str ): __lowerCamelCase : Union[str, Any] = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25} __lowerCamelCase : Optional[Any] = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True} __lowerCamelCase : Tuple = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=UpperCAmelCase ) __lowerCamelCase : Dict = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" ) __lowerCamelCase : Union[str, Any] = ["Sam"] __lowerCamelCase : List[str] = tokenizer(UpperCAmelCase , return_tensors="jax" ) __lowerCamelCase : List[str] = model.generate(**UpperCAmelCase , **UpperCAmelCase ) __lowerCamelCase : List[str] = "Sam is a great name. It means \"sun\" in Gaelic." __lowerCamelCase : Optional[Any] = tokenizer.batch_decode(UpperCAmelCase , **UpperCAmelCase ) assert generated_txt[0].strip() == tgt_text
364
"""simple docstring""" from __future__ import annotations from collections.abc import Sequence from typing import Literal def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str ) -> str | Literal[False]: '''simple docstring''' __lowerCamelCase : Optional[int] = list(_lowerCamelCase ) __lowerCamelCase : Union[str, Any] = list(_lowerCamelCase ) __lowerCamelCase : Tuple = 0 for i in range(len(_lowerCamelCase ) ): if lista[i] != lista[i]: count += 1 __lowerCamelCase : Optional[int] = "_" if count > 1: return False else: return "".join(_lowerCamelCase ) def lowercase_ ( _lowerCamelCase: list[str] ) -> list[str]: '''simple docstring''' __lowerCamelCase : List[Any] = [] while True: __lowerCamelCase : Dict = ["$"] * len(_lowerCamelCase ) __lowerCamelCase : Any = [] for i in range(len(_lowerCamelCase ) ): for j in range(i + 1 , len(_lowerCamelCase ) ): __lowerCamelCase : str = compare_string(binary[i] , binary[j] ) if k is False: __lowerCamelCase : str = "*" __lowerCamelCase : Union[str, Any] = "*" temp.append("X" ) for i in range(len(_lowerCamelCase ) ): if checka[i] == "$": pi.append(binary[i] ) if len(_lowerCamelCase ) == 0: return pi __lowerCamelCase : Tuple = list(set(_lowerCamelCase ) ) def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: Sequence[float] ) -> list[str]: '''simple docstring''' __lowerCamelCase : Union[str, Any] = [] for minterm in minterms: __lowerCamelCase : Union[str, Any] = "" for _ in range(_lowerCamelCase ): __lowerCamelCase : Tuple = str(minterm % 2 ) + string minterm //= 2 temp.append(_lowerCamelCase ) return temp def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: int ) -> bool: '''simple docstring''' __lowerCamelCase : Tuple = list(_lowerCamelCase ) __lowerCamelCase : Optional[int] = list(_lowerCamelCase ) __lowerCamelCase : str = 0 for i in range(len(_lowerCamelCase ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def lowercase_ ( _lowerCamelCase: list[list[int]] , _lowerCamelCase: list[str] ) -> list[str]: '''simple docstring''' __lowerCamelCase : Optional[int] = [] __lowerCamelCase : str = [0] * len(_lowerCamelCase ) for i in range(len(chart[0] ) ): __lowerCamelCase : List[str] = 0 __lowerCamelCase : Optional[Any] = -1 for j in range(len(_lowerCamelCase ) ): if chart[j][i] == 1: count += 1 __lowerCamelCase : List[Any] = j if count == 1: __lowerCamelCase : Optional[Any] = 1 for i in range(len(_lowerCamelCase ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(_lowerCamelCase ) ): __lowerCamelCase : List[Any] = 0 temp.append(prime_implicants[i] ) while True: __lowerCamelCase : str = 0 __lowerCamelCase : Dict = -1 __lowerCamelCase : Tuple = 0 for i in range(len(_lowerCamelCase ) ): __lowerCamelCase : Union[str, Any] = chart[i].count(1 ) if count_n > max_n: __lowerCamelCase : Optional[int] = count_n __lowerCamelCase : List[Any] = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(_lowerCamelCase ) ): __lowerCamelCase : Any = 0 def lowercase_ ( _lowerCamelCase: list[str] , _lowerCamelCase: list[str] ) -> list[list[int]]: '''simple docstring''' __lowerCamelCase : Dict = [[0 for x in range(len(_lowerCamelCase ) )] for x in range(len(_lowerCamelCase ) )] for i in range(len(_lowerCamelCase ) ): __lowerCamelCase : List[str] = prime_implicants[i].count("_" ) for j in range(len(_lowerCamelCase ) ): if is_for_table(prime_implicants[i] , binary[j] , _lowerCamelCase ): __lowerCamelCase : Dict = 1 return chart def lowercase_ ( ) -> None: '''simple docstring''' __lowerCamelCase : Any = int(input("Enter the no. of variables\n" ) ) __lowerCamelCase : List[str] = [ float(_lowerCamelCase ) for x in input( "Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split() ] __lowerCamelCase : List[str] = decimal_to_binary(_lowerCamelCase , _lowerCamelCase ) __lowerCamelCase : str = check(_lowerCamelCase ) print("Prime Implicants are:" ) print(_lowerCamelCase ) __lowerCamelCase : Union[str, Any] = prime_implicant_chart(_lowerCamelCase , _lowerCamelCase ) __lowerCamelCase : Any = selection(_lowerCamelCase , _lowerCamelCase ) print("Essential Prime Implicants are:" ) print(_lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
64
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A : Any = logging.get_logger(__name__) A : int = {"vocab_file": "sentencepiece.bpe.model"} A : str = { "vocab_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model" ), }, } A : int = { "moussaKam/mbarthez": 1_0_2_4, "moussaKam/barthez": 1_0_2_4, "moussaKam/barthez-orangesum-title": 1_0_2_4, } A : int = "▁" class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' __UpperCAmelCase : Any =VOCAB_FILES_NAMES __UpperCAmelCase : int =PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Dict =["""input_ids""", """attention_mask"""] def __init__( self , __a , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a = None , **__a , ): # Mask token behave like a normal word, i.e. include the space before it __lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token __lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , ) __lowerCAmelCase = vocab_file __lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__a ) ) __lowerCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} __lowerCAmelCase = len(self.sp_model ) - 1 __lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def snake_case ( self , __a , __a = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] __lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case ( self , __a , __a = None , __a = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a ) if token_ids_a is None: return [1] + ([0] * len(__a )) + [1] return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1] def snake_case ( self , __a , __a = None ): __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def snake_case ( self ): return len(self.sp_model ) def snake_case ( self ): __lowerCAmelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case ( self , __a ): return self.sp_model.encode(__a , out_type=__a ) def snake_case ( self , __a ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __lowerCAmelCase = self.sp_model.PieceToId(__a ) return spm_id if spm_id else self.unk_token_id def snake_case ( self , __a ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(__a ) def snake_case ( self , __a ): __lowerCAmelCase = [] __lowerCAmelCase = "" __lowerCAmelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__a ) + token __lowerCAmelCase = True __lowerCAmelCase = [] else: current_sub_tokens.append(__a ) __lowerCAmelCase = False out_string += self.sp_model.decode(__a ) return out_string.strip() def __getstate__( self ): __lowerCAmelCase = self.__dict__.copy() __lowerCAmelCase = None return state def __setstate__( self , __a ): __lowerCAmelCase = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __lowerCAmelCase = {} __lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case ( self , __a , __a = None ): if not os.path.isdir(__a ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return __lowerCAmelCase = os.path.join( __a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __a ) elif not os.path.isfile(self.vocab_file ): with open(__a , "wb" ) as fi: __lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(__a ) return (out_vocab_file,)
57
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 a : List[str] = get_tests_dir("""fixtures""") class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: # A mock response for an HTTP head request to emulate server down UpperCAmelCase : Tuple = mock.Mock() UpperCAmelCase : List[str] = 500 UpperCAmelCase : Any = {} UpperCAmelCase : List[str] = HTTPError UpperCAmelCase : str = {} # Download this model to make sure it's in the cache. UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head: UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # This check we did call the fake head request mock_head.assert_called() def _lowercase( self ) -> Any: # This test is for deprecated behavior and can be removed in v5 UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained( """https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" ) def _lowercase( self ) -> Union[str, Any]: with self.assertRaises(A ): # config is in subfolder, the following should not work without specifying the subfolder UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( """hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" ) self.assertIsNotNone(A ) @is_staging_test class UpperCamelCase_ ( unittest.TestCase ): @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def _lowercase( cls ) -> List[str]: try: delete_repo(token=cls._token , repo_id="""test-image-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" ) except HTTPError: pass def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""test-image-processor""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> List[str]: UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> Optional[int]: CustomImageProcessor.register_for_auto_class() UpperCAmelCase : Optional[Any] = CustomImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
265
0
"""simple docstring""" class __A : '''simple docstring''' def __init__( self : Union[str, Any] ,_snake_case : List[Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ : Union[str, Any] = val lowercase__ : List[str] = None lowercase__ : Dict = None def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ) -> Tuple: """simple docstring""" if self.val: if val < self.val: if self.left is None: lowercase__ : Optional[int] = Node(_snake_case ) else: self.left.insert(_snake_case ) elif val > self.val: if self.right is None: lowercase__ : str = Node(_snake_case ) else: self.right.insert(_snake_case ) else: lowercase__ : Optional[Any] = val def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]: # Recursive traversal if root: inorder(root.left , __lowerCamelCase ) res.append(root.val ) inorder(root.right , __lowerCamelCase ) def __UpperCAmelCase ( __lowerCamelCase ) -> str: # Build BST if len(__lowerCamelCase ) == 0: return arr lowercase__ : str = Node(arr[0] ) for i in range(1 , len(__lowerCamelCase ) ): root.insert(arr[i] ) # Traverse BST in order. lowercase__ : Any = [] inorder(__lowerCamelCase , __lowerCamelCase ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
302
"""simple docstring""" import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets lowerCAmelCase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n' lowerCAmelCase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n' lowerCAmelCase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): '''simple docstring''' def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,homepage='''https://github.com/krishnap25/mauve''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''predictions''': datasets.Value('''string''' ,id='''sequence''' ), '''references''': datasets.Value('''string''' ,id='''sequence''' ), } ) ,codebase_urls=['''https://github.com/krishnap25/mauve'''] ,reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ] ,) def UpperCAmelCase ( self : Dict ,_snake_case : Optional[Any] ,_snake_case : Any ,_snake_case : List[str]=None ,_snake_case : Tuple=None ,_snake_case : List[Any]=None ,_snake_case : Any=None ,_snake_case : Optional[int]="auto" ,_snake_case : Optional[int]=-1 ,_snake_case : Optional[int]=0.9 ,_snake_case : Any=5 ,_snake_case : Dict=500 ,_snake_case : Optional[int]="gpt2-large" ,_snake_case : Optional[Any]=-1 ,_snake_case : Tuple=1_024 ,_snake_case : Optional[int]=25 ,_snake_case : Dict=5 ,_snake_case : int=True ,_snake_case : Union[str, Any]=25 ,) -> Any: """simple docstring""" lowercase__ : Any = compute_mauve( p_text=_snake_case ,q_text=_snake_case ,p_features=_snake_case ,q_features=_snake_case ,p_tokens=_snake_case ,q_tokens=_snake_case ,num_buckets=_snake_case ,pca_max_data=_snake_case ,kmeans_explained_var=_snake_case ,kmeans_num_redo=_snake_case ,kmeans_max_iter=_snake_case ,featurize_model_name=_snake_case ,device_id=_snake_case ,max_text_length=_snake_case ,divergence_curve_discretization_size=_snake_case ,mauve_scaling_factor=_snake_case ,verbose=_snake_case ,seed=_snake_case ,) return out
302
1
"""simple docstring""" def _snake_case ( lowerCamelCase__ : int ) -> Optional[Any]: if not isinstance(snake_case__ , snake_case__ ): lowerCamelCase_ : Tuple =F"""Input value of [number={number}] must be an integer""" raise TypeError(snake_case__ ) if number < 0: return False lowerCamelCase_ : Optional[Any] =number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
144
"""simple docstring""" import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser A_ = re.compile(r'''\s+''') def UpperCAmelCase__ (snake_case__ : Optional[int] ): """simple docstring""" return {"hash": hashlib.mda(re.sub(snake_case__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()} def UpperCAmelCase__ (snake_case__ : Dict ): """simple docstring""" _snake_case : Any = [len(snake_case__ ) for line in example["""content"""].splitlines()] return {"line_mean": np.mean(snake_case__ ), "line_max": max(snake_case__ )} def UpperCAmelCase__ (snake_case__ : List[Any] ): """simple docstring""" _snake_case : Tuple = np.mean([c.isalnum() for c in example["""content"""]] ) return {"alpha_frac": alpha_frac} def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : List[Any] ): """simple docstring""" if example["hash"] in uniques: uniques.remove(example["""hash"""] ) return True else: return False def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : List[str]=5 ): """simple docstring""" _snake_case : Any = ["""auto-generated""", """autogenerated""", """automatically generated"""] _snake_case : Tuple = example["""content"""].splitlines() for _, line in zip(range(snake_case__ ) , snake_case__ ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Union[str, Any]=5 , snake_case__ : Any=0.05 ): """simple docstring""" _snake_case : Optional[Any] = ["""unit tests""", """test file""", """configuration file"""] _snake_case : List[Any] = example["""content"""].splitlines() _snake_case : Dict = 0 _snake_case : str = 0 # first test for _, line in zip(range(snake_case__ ) , snake_case__ ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test _snake_case : Optional[int] = example["""content"""].count("""\n""" ) _snake_case : Tuple = int(coeff * nlines ) for line in lines: count_config += line.lower().count("""config""" ) count_test += line.lower().count("""test""" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : Optional[int] = ["""def """, """class """, """for """, """while """] _snake_case : str = example["""content"""].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : List[str]=4 ): """simple docstring""" _snake_case : List[Any] = example["""content"""].splitlines() _snake_case : str = 0 for line in lines: counter += line.lower().count("""=""" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def UpperCAmelCase__ (snake_case__ : List[str] ): """simple docstring""" _snake_case : Optional[Any] = tokenizer(example["""content"""] , truncation=snake_case__ )["""input_ids"""] _snake_case : Optional[Any] = len(example["""content"""] ) / len(snake_case__ ) return {"ratio": ratio} def UpperCAmelCase__ (snake_case__ : Optional[int] ): """simple docstring""" _snake_case : Optional[int] = {} results.update(get_hash(snake_case__ ) ) results.update(line_stats(snake_case__ ) ) results.update(alpha_stats(snake_case__ ) ) results.update(char_token_ratio(snake_case__ ) ) results.update(is_autogenerated(snake_case__ ) ) results.update(is_config_or_test(snake_case__ ) ) results.update(has_no_keywords(snake_case__ ) ) results.update(has_few_assignments(snake_case__ ) ) return results def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] ): """simple docstring""" if not check_uniques(snake_case__ , snake_case__ ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def UpperCAmelCase__ (snake_case__ : Optional[Any] ): """simple docstring""" with open(snake_case__ , """rb""" ) as f_in: with gzip.open(str(snake_case__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out: shutil.copyfileobj(snake_case__ , snake_case__ ) os.unlink(snake_case__ ) # Settings A_ = HfArgumentParser(PreprocessingArguments) A_ = parser.parse_args() if args.num_workers is None: A_ = multiprocessing.cpu_count() A_ = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset A_ = time.time() A_ = load_dataset(args.dataset_name, split='''train''') print(F'''Time to load dataset: {time.time()-t_start:.2f}''') # Run preprocessing A_ = time.time() A_ = ds.map(preprocess, num_proc=args.num_workers) print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''') # Deduplicate hashes A_ = set(ds.unique('''hash''')) A_ = len(uniques) / len(ds) print(F'''Fraction of duplicates: {1-frac:.2%}''') # Deduplicate data and apply heuristics A_ = time.time() A_ = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args}) print(F'''Time to filter dataset: {time.time()-t_start:.2f}''') print(F'''Size of filtered dataset: {len(ds_filter)}''') # Deduplicate with minhash and jaccard similarity if args.near_deduplication: A_ = time.time() A_ , A_ = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''') print(F'''Size of deduplicate dataset: {len(ds_filter)}''') # Save data in batches of samples_per_file A_ = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / '''duplicate_clusters.json''', '''w''') as f: json.dump(duplicate_clusters, f) A_ = output_dir / '''data''' data_dir.mkdir(exist_ok=True) A_ = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): A_ = str(data_dir / F'''file-{file_number+1:012}.json''') A_ = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
64
0
"""simple docstring""" import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class _snake_case : def __init__( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : Any=16 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : Dict=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : int=True , UpperCAmelCase : str=False , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Optional[int]=32 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : List[str]=30 , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Any=2 , UpperCAmelCase : Optional[int]=None , ): __lowerCamelCase : List[Any] = parent __lowerCamelCase : Union[str, Any] = batch_size __lowerCamelCase : int = decoder_seq_length # For common tests __lowerCamelCase : str = self.decoder_seq_length __lowerCamelCase : Optional[Any] = is_training __lowerCamelCase : Optional[Any] = use_attention_mask __lowerCamelCase : List[str] = use_labels __lowerCamelCase : Union[str, Any] = vocab_size __lowerCamelCase : str = d_model __lowerCamelCase : Tuple = d_model __lowerCamelCase : Optional[int] = decoder_layers __lowerCamelCase : Union[str, Any] = decoder_layers __lowerCamelCase : Optional[int] = decoder_ffn_dim __lowerCamelCase : Dict = decoder_attention_heads __lowerCamelCase : Dict = decoder_attention_heads __lowerCamelCase : Tuple = eos_token_id __lowerCamelCase : str = bos_token_id __lowerCamelCase : Optional[Any] = pad_token_id __lowerCamelCase : List[Any] = decoder_start_token_id __lowerCamelCase : Tuple = use_cache __lowerCamelCase : Union[str, Any] = max_position_embeddings __lowerCamelCase : str = None __lowerCamelCase : List[Any] = decoder_seq_length __lowerCamelCase : Union[str, Any] = 2 __lowerCamelCase : int = 1 def lowerCamelCase__ ( self : Any ): __lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) __lowerCamelCase : Tuple = None if self.use_attention_mask: __lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) __lowerCamelCase : Optional[int] = None if self.use_labels: __lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) __lowerCamelCase : Optional[int] = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def lowerCamelCase__ ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : List[str] , ): __lowerCamelCase : Any = True __lowerCamelCase : Optional[Any] = TrOCRDecoder(config=UpperCAmelCase ).to(UpperCAmelCase ).eval() __lowerCamelCase : Any = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass __lowerCamelCase : Optional[Any] = model(UpperCAmelCase , use_cache=UpperCAmelCase ) __lowerCamelCase : Dict = model(UpperCAmelCase ) __lowerCamelCase : List[str] = model(UpperCAmelCase , use_cache=UpperCAmelCase ) self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) ) self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) + 1 ) __lowerCamelCase : Optional[int] = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids __lowerCamelCase : Any = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and __lowerCamelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowerCamelCase : Tuple = model(UpperCAmelCase )["last_hidden_state"] __lowerCamelCase : Dict = model(UpperCAmelCase , past_key_values=UpperCAmelCase )["last_hidden_state"] # select random slice __lowerCamelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowerCamelCase : Tuple = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() __lowerCamelCase : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-3 ) def lowerCamelCase__ ( self : List[str] ): __lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs() __lowerCamelCase : int = config_and_inputs __lowerCamelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_torch class _snake_case ( a__ , a__ , a__ , unittest.TestCase ): snake_case__ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () snake_case__ = (TrOCRForCausalLM,) if is_torch_available() else () snake_case__ = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {} snake_case__ = True snake_case__ = False def lowerCamelCase__ ( self : List[Any] ): __lowerCamelCase : Tuple = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCAmelCase ) __lowerCamelCase : List[str] = ConfigTester(self , config_class=UpperCAmelCase ) def lowerCamelCase__ ( self : Any ): pass def lowerCamelCase__ ( self : List[str] ): pass def lowerCamelCase__ ( self : Tuple ): pass def lowerCamelCase__ ( self : Union[str, Any] ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Dict ): __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*UpperCAmelCase ) def lowerCamelCase__ ( self : List[str] ): return @unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :) def lowerCamelCase__ ( self : List[str] ): pass
352
"""simple docstring""" import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class _snake_case ( unittest.TestCase ): @property def lowerCamelCase__ ( self : Dict ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowerCamelCase__ ( self : str ): __lowerCamelCase : List[Any] = ort.SessionOptions() __lowerCamelCase : List[Any] = False return options def lowerCamelCase__ ( self : Any ): __lowerCamelCase : int = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) __lowerCamelCase : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) __lowerCamelCase : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" ) # using the PNDM scheduler by default __lowerCamelCase : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=UpperCAmelCase , feature_extractor=UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) __lowerCamelCase : Dict = "A red cat sitting on a park bench" __lowerCamelCase : List[str] = np.random.RandomState(0 ) __lowerCamelCase : str = pipe( prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=15 , generator=UpperCAmelCase , output_type="np" , ) __lowerCamelCase : Dict = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-2
64
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: UpperCAmelCase_ = None UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase_ = { 'vocab_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model', }, 'tokenizer_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json', }, } UpperCAmelCase_ = { 'albert-base-v1': 5_1_2, 'albert-large-v1': 5_1_2, 'albert-xlarge-v1': 5_1_2, 'albert-xxlarge-v1': 5_1_2, 'albert-base-v2': 5_1_2, 'albert-large-v2': 5_1_2, 'albert-xlarge-v2': 5_1_2, 'albert-xxlarge-v2': 5_1_2, } UpperCAmelCase_ = '▁' class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Any = VOCAB_FILES_NAMES lowerCAmelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ : int = AlbertTokenizer def __init__( self : Tuple , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : int=False , _UpperCAmelCase : str="[CLS]" , _UpperCAmelCase : Optional[Any]="[SEP]" , _UpperCAmelCase : Optional[int]="<unk>" , _UpperCAmelCase : List[str]="[SEP]" , _UpperCAmelCase : Optional[Any]="<pad>" , _UpperCAmelCase : List[Any]="[CLS]" , _UpperCAmelCase : Tuple="[MASK]" , **_UpperCAmelCase : Union[str, Any] , ): """simple docstring""" UpperCAmelCase__ = ( AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase , normalized=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token ) super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , ) UpperCAmelCase__ = do_lower_case UpperCAmelCase__ = remove_space UpperCAmelCase__ = keep_accents UpperCAmelCase__ = vocab_file UpperCAmelCase__ = False if not self.vocab_file else True def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): """simple docstring""" UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): """simple docstring""" UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(_UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase__ = os.path.join( _UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ): copyfile(self.vocab_file , _UpperCAmelCase ) return (out_vocab_file,)
346
'''simple docstring''' import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py UpperCAmelCase_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' UpperCAmelCase_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' UpperCAmelCase_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=False ): """simple docstring""" UpperCAmelCase__ = compute_bleu( reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase ) ((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
346
1
from __future__ import annotations from math import pi, sqrt def __UpperCAmelCase ( __a : Optional[Any] ,__a : Tuple ) -> Optional[int]: """simple docstring""" if inductance <= 0: raise ValueError('''Inductance cannot be 0 or negative''' ) elif capacitance <= 0: raise ValueError('''Capacitance cannot be 0 or negative''' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
368
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class UpperCAmelCase_ ( enum.Enum ): """simple docstring""" UpperCAmelCase__ : int = 0 UpperCAmelCase__ : Union[str, Any] = 1 UpperCAmelCase__ : Optional[Any] = 2 @add_end_docstrings(__lowercase ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self , *_a , **_a ) -> List[str]: super().__init__(*_a , **_a ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _a : Dict = None if self.model.config.prefix is not None: _a : List[Any] = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _a : Optional[Any] = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _a , _a , _a : str = self._sanitize_parameters(prefix=_a , **self._forward_params ) _a : Optional[Any] = {**self._preprocess_params, **preprocess_params} _a : List[Any] = {**self._forward_params, **forward_params} def __lowercase ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> Optional[int]: _a : List[Any] = {} if prefix is not None: _a : Optional[Any] = prefix if prefix: _a : Dict = self.tokenizer( _a , padding=_a , add_special_tokens=_a , return_tensors=self.framework ) _a : Tuple = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected""" ''' [None, \'hole\']''' ) _a : Dict = handle_long_generation preprocess_params.update(_a ) _a : Tuple = generate_kwargs _a : Any = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) _a : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) _a : Any = ReturnType.TENSORS if return_type is not None: _a : Any = return_type if clean_up_tokenization_spaces is not None: _a : List[Any] = clean_up_tokenization_spaces if stop_sequence is not None: _a : Tuple = self.tokenizer.encode(_a , add_special_tokens=_a ) if len(_a ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) _a : List[Any] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def __lowercase ( self , *_a , **_a ) -> Union[str, Any]: # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_a , **_a ) def __call__( self , _a , **_a ) -> List[str]: return super().__call__(_a , **_a ) def __lowercase ( self , _a , _a="" , _a=None , **_a ) -> List[Any]: _a : Optional[int] = self.tokenizer( prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework ) _a : Union[str, Any] = prompt_text if handle_long_generation == "hole": _a : List[str] = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: _a : int = generate_kwargs['''max_new_tokens'''] else: _a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: _a : List[str] = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) _a : List[Any] = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: _a : List[str] = inputs['''attention_mask'''][:, -keep_length:] return inputs def __lowercase ( self , _a , **_a ) -> Optional[int]: _a : Any = model_inputs['''input_ids'''] _a : Optional[Any] = model_inputs.get('''attention_mask''' , _a ) # Allow empty prompts if input_ids.shape[1] == 0: _a : int = None _a : int = None _a : List[str] = 1 else: _a : List[Any] = input_ids.shape[0] _a : Union[str, Any] = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _a : int = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: _a : Tuple = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: _a : int = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _a : Dict = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _a : Optional[Any] = self.model.generate(input_ids=_a , attention_mask=_a , **_a ) _a : int = generated_sequence.shape[0] if self.framework == "pt": _a : Tuple = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _a : List[Any] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def __lowercase ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> int: _a : Tuple = model_outputs['''generated_sequence'''][0] _a : int = model_outputs['''input_ids'''] _a : Any = model_outputs['''prompt_text'''] _a : Any = generated_sequence.numpy().tolist() _a : Any = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _a : Optional[int] = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _a : str = self.tokenizer.decode( _a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _a : Union[str, Any] = 0 else: _a : str = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) ) if return_type == ReturnType.FULL_TEXT: _a : str = prompt_text + text[prompt_length:] else: _a : List[str] = text[prompt_length:] _a : Union[str, Any] = {'''generated_text''': all_text} records.append(_a ) return records
15
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) snake_case_ = { """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
78
"""simple docstring""" import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def _lowerCAmelCase ( lowercase_ ): UpperCAmelCase = fname.split(os.path.sep )[-1] return re.search(R'^(.*)_\d+\.jpg$' , lowercase_ ).groups()[0] class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self :List[str] , lowercase_ :Dict , lowercase_ :List[str]=None , lowercase_ :Optional[Any]=None ) -> Optional[int]: UpperCAmelCase = file_names UpperCAmelCase = image_transform UpperCAmelCase = label_to_id def __len__( self :Optional[int] ) -> Optional[Any]: return len(self.file_names ) def __getitem__( self :int , lowercase_ :str ) -> List[str]: UpperCAmelCase = self.file_names[idx] UpperCAmelCase = PIL.Image.open(lowercase_ ) UpperCAmelCase = raw_image.convert('RGB' ) if self.image_transform is not None: UpperCAmelCase = self.image_transform(lowercase_ ) UpperCAmelCase = extract_label(lowercase_ ) if self.label_to_id is not None: UpperCAmelCase = self.label_to_id[label] return {"image": image, "label": label} def _lowerCAmelCase ( lowercase_ , lowercase_ ): # Initialize accelerator if args.with_tracking: UpperCAmelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir ) else: UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase = config['lr'] UpperCAmelCase = int(config['num_epochs'] ) UpperCAmelCase = int(config['seed'] ) UpperCAmelCase = int(config['batch_size'] ) UpperCAmelCase = config['image_size'] if not isinstance(lowercase_ , (list, tuple) ): UpperCAmelCase = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , 'isdigit' ): if args.checkpointing_steps == "epoch": UpperCAmelCase = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): UpperCAmelCase = int(args.checkpointing_steps ) else: raise ValueError( F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" ) else: UpperCAmelCase = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: UpperCAmelCase = os.path.split(lowercase_ )[-1].split('.' )[0] accelerator.init_trackers(lowercase_ , lowercase_ ) # Grab all the image filenames UpperCAmelCase = [os.path.join(args.data_dir , lowercase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )] # Build the label correspondences UpperCAmelCase = [extract_label(lowercase_ ) for fname in file_names] UpperCAmelCase = list(set(lowercase_ ) ) id_to_label.sort() UpperCAmelCase = {lbl: i for i, lbl in enumerate(lowercase_ )} # Set the seed before splitting the data. np.random.seed(lowercase_ ) torch.manual_seed(lowercase_ ) torch.cuda.manual_seed_all(lowercase_ ) # Split our filenames between train and validation UpperCAmelCase = np.random.permutation(len(lowercase_ ) ) UpperCAmelCase = int(0.8 * len(lowercase_ ) ) UpperCAmelCase = random_perm[:cut] UpperCAmelCase = random_perm[cut:] # For training we use a simple RandomResizedCrop UpperCAmelCase = Compose([RandomResizedCrop(lowercase_ , scale=(0.5, 1.0) ), ToTensor()] ) UpperCAmelCase = PetsDataset( [file_names[i] for i in train_split] , image_transform=lowercase_ , label_to_id=lowercase_ ) # For evaluation, we use a deterministic Resize UpperCAmelCase = Compose([Resize(lowercase_ ), ToTensor()] ) UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase_ , label_to_id=lowercase_ ) # Instantiate dataloaders. UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 ) UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase = create_model('resnet50d' , pretrained=lowercase_ , num_classes=len(lowercase_ ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): UpperCAmelCase = False for param in model.get_classifier().parameters(): UpperCAmelCase = True # We normalize the batches of images to be a bit faster. UpperCAmelCase = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device ) UpperCAmelCase = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 ) # Instantiate learning rate scheduler UpperCAmelCase = OneCycleLR(optimizer=lowercase_ , max_lr=lowercase_ , epochs=lowercase_ , steps_per_epoch=len(lowercase_ ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # We need to keep track of how many total steps we have iterated over UpperCAmelCase = 0 # We also need to keep track of the starting epoch so files are named properly UpperCAmelCase = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" ) accelerator.load_state(args.resume_from_checkpoint ) UpperCAmelCase = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` UpperCAmelCase = os.path.splitext(lowercase_ )[0] if "epoch" in training_difference: UpperCAmelCase = int(training_difference.replace('epoch_' , '' ) ) + 1 UpperCAmelCase = None else: UpperCAmelCase = int(training_difference.replace('step_' , '' ) ) UpperCAmelCase = resume_step // len(lowercase_ ) resume_step -= starting_epoch * len(lowercase_ ) # Now we train the model for epoch in range(lowercase_ , lowercase_ ): model.train() if args.with_tracking: UpperCAmelCase = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step UpperCAmelCase = accelerator.skip_first_batches(lowercase_ , lowercase_ ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader UpperCAmelCase = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()} UpperCAmelCase = (batch['image'] - mean) / std UpperCAmelCase = model(lowercase_ ) UpperCAmelCase = torch.nn.functional.cross_entropy(lowercase_ , batch['label'] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(lowercase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = F"""step_{overall_step}""" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: UpperCAmelCase = os.path.join(args.output_dir , lowercase_ ) accelerator.save_state(lowercase_ ) model.eval() UpperCAmelCase = 0 UpperCAmelCase = 0 for step, batch in enumerate(lowercase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()} UpperCAmelCase = (batch['image'] - mean) / std with torch.no_grad(): UpperCAmelCase = model(lowercase_ ) UpperCAmelCase = outputs.argmax(dim=-1 ) UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['label']) ) UpperCAmelCase = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() UpperCAmelCase = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" ) if args.with_tracking: accelerator.log( { 'accuracy': 100 * eval_metric, 'train_loss': total_loss.item() / len(lowercase_ ), 'epoch': epoch, } , step=lowercase_ , ) if checkpointing_steps == "epoch": UpperCAmelCase = F"""epoch_{epoch}""" if args.output_dir is not None: UpperCAmelCase = os.path.join(args.output_dir , lowercase_ ) accelerator.save_state(lowercase_ ) if args.with_tracking: accelerator.end_training() def _lowerCAmelCase ( ): UpperCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument('--data_dir' , required=lowercase_ , help='The data folder on disk.' ) parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' ) parser.add_argument( '--mixed_precision' , type=lowercase_ , default=lowercase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) parser.add_argument( '--checkpointing_steps' , type=lowercase_ , default=lowercase_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , ) parser.add_argument( '--output_dir' , type=lowercase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--resume_from_checkpoint' , type=lowercase_ , default=lowercase_ , help='If the training should continue from a checkpoint folder.' , ) parser.add_argument( '--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , ) parser.add_argument( '--project_dir' , type=lowercase_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , ) UpperCAmelCase = parser.parse_args() UpperCAmelCase = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224} training_function(lowercase_ , lowercase_ ) if __name__ == "__main__": main()
78
1
'''simple docstring''' import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device _UpperCamelCase = False class _A ( unittest.TestCase ): pass @slow @require_torch_gpu class _A ( unittest.TestCase ): def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Any = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCAmelCase : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) __UpperCAmelCase : str = torch.manual_seed(0 ) __UpperCAmelCase : Tuple = pipe( image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images __UpperCAmelCase : Dict = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) __UpperCAmelCase : str = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
364
'''simple docstring''' import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_MASKED_LM_MAPPING _SCREAMING_SNAKE_CASE : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING def __A ( self ) -> Any: '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" ) __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped"""}, {"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser"""}, ] , ) __UpperCAmelCase : List[str] = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ { """sequence""": """The largest city in France is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped""", }, { """sequence""": """The largest city in France is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser""", }, ] , ) __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2_941, """token_str""": """ Te"""}, ] , ) @require_torch def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" ) __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul"""}, {"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""}, ] , ) __UpperCAmelCase : Dict = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ { """sequence""": """The largest city in France is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", }, {"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""}, ] , ) __UpperCAmelCase : str = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2_941, """token_str""": """ Te"""}, {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""}, ] , ) __UpperCAmelCase : Optional[int] = unmasker("""My name is <mask> <mask>""" , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ [ { """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", """sequence""": """<s>My name is Maul<mask></s>""", }, {"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""}, ], [ { """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", """sequence""": """<s>My name is<mask> Maul</s>""", }, {"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""}, ], ] , ) @require_torch_gpu def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" ) # convert model to fp16 pipe.model.half() __UpperCAmelCase : str = pipe("""Paris is the [MASK] of France.""" ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) @slow @require_torch def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" ) self.run_large_test(__UpperCAmelCase ) @slow @require_tf def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : int = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" ) self.run_large_test(__UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""}, {"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_573, """token_str""": """ Chris"""}, ] , ) __UpperCAmelCase : Optional[int] = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ { """sequence""": """The largest city in France is Paris""", """score""": 0.251, """token""": 2_201, """token_str""": """ Paris""", }, { """sequence""": """The largest city in France is Lyon""", """score""": 0.214, """token""": 12_790, """token_str""": """ Lyon""", }, ] , ) __UpperCAmelCase : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13_606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_941, """token_str""": """ Te"""}, ] , ) @require_torch def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" ) __UpperCAmelCase : Tuple = None __UpperCAmelCase : int = None self.run_pipeline_test(__UpperCAmelCase , [] ) @require_tf def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : str = None self.run_pipeline_test(__UpperCAmelCase , [] ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" ) __UpperCAmelCase : str = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : int = [ f'This is another {tokenizer.mask_token} test', ] return fill_masker, examples def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = fill_masker.tokenizer __UpperCAmelCase : Union[str, Any] = fill_masker.model __UpperCAmelCase : Tuple = fill_masker( f'This is a {tokenizer.mask_token}' , ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : int = fill_masker([f'This is a {tokenizer.mask_token}'] ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : Union[str, Any] = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] ) self.assertEqual( __UpperCAmelCase , [ [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], ] , ) with self.assertRaises(__UpperCAmelCase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(__UpperCAmelCase ): fill_masker("""This is""" ) self.run_test_top_k(__UpperCAmelCase , __UpperCAmelCase ) self.run_test_targets(__UpperCAmelCase , __UpperCAmelCase ) self.run_test_top_k_targets(__UpperCAmelCase , __UpperCAmelCase ) self.fill_mask_with_duplicate_targets_and_top_k(__UpperCAmelCase , __UpperCAmelCase ) self.fill_mask_with_multiple_masks(__UpperCAmelCase , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' __UpperCAmelCase : Dict = tokenizer.get_vocab() __UpperCAmelCase : Dict = sorted(vocab.keys() )[:2] # Pipeline argument __UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , targets=__UpperCAmelCase ) __UpperCAmelCase : List[str] = fill_masker(f'This is a {tokenizer.mask_token}' ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : Any = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase ) __UpperCAmelCase : int = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) ) # Call argument __UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : Tuple = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : List[Any] = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase ) __UpperCAmelCase : List[Any] = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) ) # Score equivalence __UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase ) __UpperCAmelCase : Dict = [top_mask["""token_str"""] for top_mask in outputs] __UpperCAmelCase : str = [top_mask["""score"""] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__UpperCAmelCase ) == set(__UpperCAmelCase ): __UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase ) __UpperCAmelCase : int = [top_mask["""score"""] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) # Raises with invalid with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Any = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""""""] ) with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , targets="""""" ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , top_k=2 ) __UpperCAmelCase : Optional[int] = fill_masker(f'This is a {tokenizer.mask_token}' ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : int = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' __UpperCAmelCase : int = tokenizer.get_vocab() __UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) # top_k=2, ntargets=3 __UpperCAmelCase : Dict = sorted(vocab.keys() )[:3] __UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=__UpperCAmelCase ) # If we use the most probably targets, and filter differently, we should still # have the same results __UpperCAmelCase : Tuple = [el["""token_str"""] for el in sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__UpperCAmelCase ).issubset(__UpperCAmelCase ): __UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=__UpperCAmelCase ) # They should yield exactly the same result self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : List[Any] = tokenizer.get_vocab() # String duplicates + id duplicates __UpperCAmelCase : Dict = sorted(vocab.keys() )[:3] __UpperCAmelCase : Dict = [targets[0], targets[1], targets[0], targets[2], targets[1]] __UpperCAmelCase : Optional[int] = fill_masker(f'My name is {tokenizer.mask_token}' , targets=__UpperCAmelCase , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(__UpperCAmelCase ) , 3 ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : Dict = fill_masker( f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 ) self.assertEqual( __UpperCAmelCase , [ [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], ] , )
16
0
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class snake_case__ (A__ ): """simple docstring""" __lowerCAmelCase :Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
170
import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def lowerCAmelCase_ ( _lowercase : str , _lowercase : str , **_lowercase : Optional[Any]) -> Optional[int]: """simple docstring""" a__ : List[Any] = AutoConfig.from_pretrained(_lowercase , **_lowercase) a__ : Dict = AutoModelForSeqaSeqLM.from_config(_lowercase) model.save_pretrained(_lowercase) AutoTokenizer.from_pretrained(_lowercase).save_pretrained(_lowercase) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
170
1
'''simple docstring''' import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) UpperCamelCase_ : List[Any] = getLogger(__name__) def __a ( _UpperCamelCase: List[Any] , _UpperCamelCase: str , _UpperCamelCase: str , _UpperCamelCase: int = 8 , _UpperCamelCase: int = 1_024 , _UpperCamelCase: Optional[Any]="val" , _UpperCamelCase: Optional[Any]=None , _UpperCamelCase: Union[str, Any]=False , _UpperCamelCase: Any="summarization" , _UpperCamelCase: int=None , _UpperCamelCase: str=1 , _UpperCamelCase: Dict = None , _UpperCamelCase: Optional[int]="" , **_UpperCamelCase: Optional[int] , ) -> Dict: """simple docstring""" _snake_case = str(_UpperCamelCase ) assert local_rank is not None torch.distributed.init_process_group(backend="nccl" , rank=_UpperCamelCase ) _snake_case = Path(_UpperCamelCase ) _snake_case = save_dir.joinpath(F"""rank_{local_rank}_output.json""" ) torch.cuda.set_device(_UpperCamelCase ) _snake_case = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase ).cuda() if fpaa: _snake_case = model.half() # determine if we need to increase num_beams use_task_specific_params(_UpperCamelCase , _UpperCamelCase ) # update config with task specific params _snake_case = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: _snake_case = num_return_sequences _snake_case = AutoTokenizer.from_pretrained(_UpperCamelCase ) logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type. if max_source_length is None: _snake_case = tokenizer.model_max_length if prefix is None: _snake_case = prefix or getattr(model.config , "prefix" , "" ) or "" _snake_case = SeqaSeqDataset( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , max_target_length=1_024 , type_path=_UpperCamelCase , n_obs=_UpperCamelCase , prefix=_UpperCamelCase , **_UpperCamelCase , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. _snake_case = ds.make_sortish_sampler(_UpperCamelCase , distributed=_UpperCamelCase , add_extra_examples=_UpperCamelCase , shuffle=_UpperCamelCase ) _snake_case = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=ds.collate_fn ) _snake_case = [] for batch in tqdm(_UpperCamelCase ): _snake_case = model.generate( input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=_UpperCamelCase , num_beams=_UpperCamelCase , **_UpperCamelCase , ) _snake_case = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase ) _snake_case = batch["ids"] if num_return_sequences > 1: _snake_case = chunks(_UpperCamelCase , _UpperCamelCase ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(_UpperCamelCase ): results.append({"pred": pred, "id": ids[i].item()} ) save_json(_UpperCamelCase , _UpperCamelCase ) return results, sampler.num_replicas def __a ( ) -> Tuple: """simple docstring""" _snake_case = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir" , type=_UpperCamelCase , help="like cnn_dm/test.source" ) parser.add_argument( "--model_name" , type=_UpperCamelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , ) parser.add_argument("--save_dir" , type=_UpperCamelCase , help="where to save" , default="tmp_gen" ) parser.add_argument("--max_source_length" , type=_UpperCamelCase , default=_UpperCamelCase ) parser.add_argument( "--type_path" , type=_UpperCamelCase , default="test" , help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task" , type=_UpperCamelCase , default="summarization" , help="used for task_specific_params + metrics" ) parser.add_argument("--bs" , type=_UpperCamelCase , default=8 , required=_UpperCamelCase , help="batch size" ) parser.add_argument( "--local_rank" , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs" , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase , help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences" , type=_UpperCamelCase , default=1 , required=_UpperCamelCase , help="How many sequences to return" ) parser.add_argument( "--sync_timeout" , type=_UpperCamelCase , default=600 , required=_UpperCamelCase , help="How long should master process wait for other processes to finish." , ) parser.add_argument("--src_lang" , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase ) parser.add_argument("--tgt_lang" , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase ) parser.add_argument( "--prefix" , type=_UpperCamelCase , required=_UpperCamelCase , default=_UpperCamelCase , help="will be added to the begininng of src examples" ) parser.add_argument("--fp16" , action="store_true" ) parser.add_argument("--debug" , action="store_true" ) _snake_case = time.time() _snake_case , _snake_case = parser.parse_known_args() _snake_case = parse_numeric_n_bool_cl_kwargs(_UpperCamelCase ) if generate_kwargs and args.local_rank <= 0: print(F"""parsed the following generate kwargs: {generate_kwargs}""" ) _snake_case = Path(args.save_dir + "_tmp" ) Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) # this handles locking. _snake_case = list(json_save_dir.glob("rank_*.json" ) ) if intermediate_files: raise ValueError(F"""Found files at {json_save_dir} please move or remove them.""" ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. _snake_case = {} if args.src_lang is not None: _snake_case = args.src_lang if args.tgt_lang is not None: _snake_case = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=_UpperCamelCase ) _snake_case , _snake_case = eval_data_dir( args.data_dir , _UpperCamelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_UpperCamelCase , **_UpperCamelCase , ) if args.local_rank <= 0: _snake_case = Path(args.save_dir ) save_dir.mkdir(exist_ok=_UpperCamelCase ) _snake_case = gather_results_from_each_node(_UpperCamelCase , _UpperCamelCase , args.sync_timeout ) _snake_case = combine_partial_results(_UpperCamelCase ) if args.num_return_sequences > 1: _snake_case = save_dir.joinpath("pseudolabel_results.json" ) print(F"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" ) save_json(_UpperCamelCase , _UpperCamelCase ) return _snake_case = Path(args.data_dir ).joinpath(args.type_path + ".target" ) with open(_UpperCamelCase ) as f: _snake_case = [x.rstrip() for x in f.readlines()][: len(_UpperCamelCase )] # Calculate metrics, save metrics, and save _generations.txt _snake_case = "translation" in args.task _snake_case = calculate_bleu if calc_bleu else calculate_rouge _snake_case = "bleu" if calc_bleu else "rouge" _snake_case = score_fn(_UpperCamelCase , _UpperCamelCase ) _snake_case = len(_UpperCamelCase ) _snake_case = time.time() - start_time _snake_case = round(runtime / metrics["n_obs"] , 4 ) _snake_case = num_replicas # TODO(@stas00): add whatever metadata to metrics _snake_case = save_dir.joinpath(F"""{args.type_path}_{metric_name}.json""" ) save_json(_UpperCamelCase , _UpperCamelCase , indent=_UpperCamelCase ) print(_UpperCamelCase ) write_txt_file(_UpperCamelCase , save_dir.joinpath(F"""{args.type_path}_generations.txt""" ) ) if args.debug: write_txt_file(_UpperCamelCase , save_dir.joinpath(F"""{args.type_path}.target""" ) ) else: shutil.rmtree(_UpperCamelCase ) def __a ( _UpperCamelCase: Dict ) -> List: """simple docstring""" _snake_case = [] for partial_result in partial_results: records.extend(_UpperCamelCase ) _snake_case = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : x["id"] ) _snake_case = [x["pred"] for x in records] return preds def __a ( _UpperCamelCase: List[Any] , _UpperCamelCase: str , _UpperCamelCase: int ) -> List[Dict[str, List]]: """simple docstring""" _snake_case = time.time() logger.info("waiting for all nodes to finish" ) _snake_case = None while (time.time() - start_wait) < timeout: _snake_case = list(save_dir.glob("rank_*.json" ) ) if len(_UpperCamelCase ) < num_replicas: continue try: # make sure all json files are fully saved _snake_case = lmap(_UpperCamelCase , _UpperCamelCase ) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
142
'''simple docstring''' from manim import * class _a ( __lowerCAmelCase ): def _lowercase ( self ) -> Optional[int]: _snake_case = Rectangle(height=0.5 ,width=0.5 ) _snake_case = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0 ) _snake_case = [mem.copy() for i in range(6 )] _snake_case = [mem.copy() for i in range(6 )] _snake_case = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 ) _snake_case = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 ) _snake_case = VGroup(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 ) _snake_case = Text("CPU" ,font_size=24 ) _snake_case = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0.5 ,aligned_edge=_SCREAMING_SNAKE_CASE ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_SCREAMING_SNAKE_CASE ) _snake_case = [mem.copy() for i in range(4 )] _snake_case = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 ) _snake_case = Text("GPU" ,font_size=24 ) _snake_case = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0.5 ,aligned_edge=_SCREAMING_SNAKE_CASE ) gpu.move_to([-1, -1, 0] ) self.add(_SCREAMING_SNAKE_CASE ) _snake_case = [mem.copy() for i in range(6 )] _snake_case = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 ) _snake_case = Text("Model" ,font_size=24 ) _snake_case = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0.5 ,aligned_edge=_SCREAMING_SNAKE_CASE ) model.move_to([3, -1.0, 0] ) self.add(_SCREAMING_SNAKE_CASE ) _snake_case = [] for i, rect in enumerate(_SCREAMING_SNAKE_CASE ): rect.set_stroke(_SCREAMING_SNAKE_CASE ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) _snake_case = Rectangle(height=0.4_6 / 4 ,width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE ,opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.0_2 ,direction=_SCREAMING_SNAKE_CASE ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] ,direction=_SCREAMING_SNAKE_CASE ,buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] ,direction=_SCREAMING_SNAKE_CASE ,buff=0.0 ) self.add(_SCREAMING_SNAKE_CASE ) cpu_targs.append(_SCREAMING_SNAKE_CASE ) _snake_case = [mem.copy() for i in range(6 )] _snake_case = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 ) _snake_case = Text("Loaded Checkpoint" ,font_size=24 ) _snake_case = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,aligned_edge=_SCREAMING_SNAKE_CASE ,buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) _snake_case = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _snake_case = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,) key_text.move_to([-5, 2.4, 0] ) self.add(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) _snake_case = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,) blue_text.next_to(_SCREAMING_SNAKE_CASE ,DOWN * 2.4 ,aligned_edge=key_text.get_left() ) _snake_case = MarkupText( f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" ,font_size=24 ,) step_a.move_to([2, 2, 0] ) self.play(Write(_SCREAMING_SNAKE_CASE ) ,Write(_SCREAMING_SNAKE_CASE ) ) self.play(Write(_SCREAMING_SNAKE_CASE ,run_time=1 ) ,Create(_SCREAMING_SNAKE_CASE ,run_time=1 ) ) _snake_case = [] _snake_case = [] for i, rect in enumerate(_SCREAMING_SNAKE_CASE ): _snake_case = fill.copy().set_fill(_SCREAMING_SNAKE_CASE ,opacity=0.7 ) target.move_to(_SCREAMING_SNAKE_CASE ) first_animations.append(GrowFromCenter(_SCREAMING_SNAKE_CASE ,run_time=1 ) ) _snake_case = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(_SCREAMING_SNAKE_CASE ,run_time=1.5 ) ) self.play(*_SCREAMING_SNAKE_CASE ) self.play(*_SCREAMING_SNAKE_CASE ) self.wait()
142
1
"""simple docstring""" def lowercase ( lowerCAmelCase__ : int ) -> str: if number > 0: raise ValueError('''input must be a negative integer''' ) __a = len(bin(lowerCAmelCase__ )[3:] ) __a = bin(abs(lowerCAmelCase__ ) - (1 << binary_number_length) )[3:] __a = ( ( '''1''' + '''0''' * (binary_number_length - len(lowerCAmelCase__ )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
45
import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _snake_case : int = logging.get_logger(__name__) _snake_case : Union[str, Any] = "▁" _snake_case : Any = {"vocab_file": "prophetnet.tokenizer"} _snake_case : Tuple = { "vocab_file": { "microsoft/xprophetnet-large-wiki100-cased": ( "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer" ), } } _snake_case : Any = { "microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False}, } _snake_case : Any = { "microsoft/xprophetnet-large-wiki100-cased": 512, } def lowerCAmelCase_ ( __lowerCamelCase ): __snake_case : Union[str, Any] = collections.OrderedDict() with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader: __snake_case : Dict = reader.readlines() for index, token in enumerate(__lowerCamelCase ): __snake_case : Optional[int] = token.rstrip("\n" ) __snake_case : Union[str, Any] = index return vocab class a (_lowerCAmelCase ): """simple docstring""" __UpperCAmelCase : List[str] = VOCAB_FILES_NAMES __UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Any = ["input_ids", "attention_mask"] def __init__( self : Any , lowerCamelCase : str , lowerCamelCase : Optional[Any]="[SEP]" , lowerCamelCase : List[str]="[SEP]" , lowerCamelCase : Dict="[SEP]" , lowerCamelCase : Optional[int]="[UNK]" , lowerCamelCase : Dict="[PAD]" , lowerCamelCase : str="[CLS]" , lowerCamelCase : str="[MASK]" , lowerCamelCase : Optional[Dict[str, Any]] = None , **lowerCamelCase : int , ) -> None: __snake_case : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , ) try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece" ) raise __snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCamelCase ) ) __snake_case : Optional[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab __snake_case : List[str] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4} for i in range(10 ): __snake_case : List[str] = F'[unused{i}]' __snake_case : int = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab __snake_case : Any = 12 __snake_case : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(lowerCamelCase ) def __getstate__( self : Union[str, Any] ) -> Union[str, Any]: __snake_case : Union[str, Any] = self.__dict__.copy() __snake_case : int = None return state def __setstate__( self : Tuple , lowerCamelCase : List[str] ) -> Tuple: __snake_case : Tuple = d try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece" ) raise # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __snake_case : Tuple = {} __snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __snake_case ( self : Dict , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase ) if token_ids_a is None: return ([0] * len(lowerCamelCase )) + [1] return ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) + [1] def __snake_case ( self : Any , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]: __snake_case : str = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __snake_case ( self : str ) -> str: return len(self.sp_model ) + self.fairseq_offset def __snake_case ( self : Any ) -> int: __snake_case : Dict = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __snake_case ( self : str , lowerCamelCase : str ) -> str: return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase ) def __snake_case ( self : List[str] , lowerCamelCase : Dict ) -> Dict: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __snake_case : List[Any] = self.sp_model.PieceToId(lowerCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __snake_case ( self : Dict , lowerCamelCase : Optional[int] ) -> Optional[Any]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __snake_case ( self : int , lowerCamelCase : Union[str, Any] ) -> Optional[Any]: __snake_case : str = "".join(lowerCamelCase ).replace(lowerCamelCase , " " ).strip() return out_string def __snake_case ( self : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(lowerCamelCase ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return __snake_case : List[str] = os.path.join( lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase , "wb" ) as fi: __snake_case : Any = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase ) return (out_vocab_file,) def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.sep_token_id] __snake_case : List[str] = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
123
0
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer UpperCAmelCase__ = flax_key_tuple[:-1] + ("""weight""",) UpperCAmelCase__ = torch.permute(SCREAMING_SNAKE_CASE__ , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE__ ): # linear layer UpperCAmelCase__ = flax_key_tuple[:-1] + ("""weight""",) UpperCAmelCase__ = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: UpperCAmelCase__ = flax_key_tuple[:-1] + ("""weight""",) return flax_key_tuple, flax_tensor def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' if "metadata" in layer: UpperCAmelCase__ = layer.split("""metadata""" ) UpperCAmelCase__ = """""".join(split_layer[0] )[:-1] UpperCAmelCase__ = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )] elif "kvstore" in layer: UpperCAmelCase__ = layer.split("""kvstore""" ) UpperCAmelCase__ = """""".join(split_layer[0] )[:-1] UpperCAmelCase__ = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )] else: UpperCAmelCase__ = layer.split("""/""" ) UpperCAmelCase__ = """/""".join(split_layer[:-1] ) UpperCAmelCase__ = (split_layer[-1],) if "kvstore/path" in layer: UpperCAmelCase__ = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}''' elif "kvstore/driver" in layer: UpperCAmelCase__ = """file""" else: UpperCAmelCase__ = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = rename_keys(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = {} for k, v in current_block.items(): UpperCAmelCase__ = v UpperCAmelCase__ = new_current_block torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str = WEIGHTS_NAME ): '''simple docstring''' UpperCAmelCase__ = convert_file_size_to_int(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = [] UpperCAmelCase__ = {} UpperCAmelCase__ = 0 UpperCAmelCase__ = 0 os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp: UpperCAmelCase__ = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""] UpperCAmelCase__ = flatten_dict(SCREAMING_SNAKE_CASE__ , sep="""/""" ) UpperCAmelCase__ = {} for layer in checkpoint_info.keys(): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = get_key_and_tensorstore_dict( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if curr_real_layer_name in all_layers: UpperCAmelCase__ = content else: UpperCAmelCase__ = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file UpperCAmelCase__ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() UpperCAmelCase__ = torch.tensor(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts UpperCAmelCase__ , UpperCAmelCase__ = rename_base_flax_keys(tuple(key.split("""/""" ) ) , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = """/""".join(SCREAMING_SNAKE_CASE__ ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: UpperCAmelCase__ = os.path.join( SCREAMING_SNAKE_CASE__ , weights_name.replace(""".bin""" , F'''-{len(SCREAMING_SNAKE_CASE__ )+1:05d}-of-???.bin''' ) ) rename_and_save_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) sharded_state_dicts.append(current_block.keys() ) del current_block UpperCAmelCase__ = {} UpperCAmelCase__ = 0 UpperCAmelCase__ = raw_weights.to(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) current_block_size += weight_size total_size += weight_size # Add the last block UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , weights_name.replace(""".bin""" , F'''-{len(SCREAMING_SNAKE_CASE__ )+1:05d}-of-???.bin''' ) ) rename_and_save_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(SCREAMING_SNAKE_CASE__ ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index UpperCAmelCase__ = {} UpperCAmelCase__ = {} for idx, shard in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = weights_name.replace( """.bin""" , F'''-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE__ ):05d}.bin''' ) # len(sharded_state_dicts):05d} UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) UpperCAmelCase__ = shard for key in shard: UpperCAmelCase__ = shard_file # Add the metadata UpperCAmelCase__ = {"""total_size""": total_size} UpperCAmelCase__ = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , """w""" , encoding="""utf-8""" ) as f: UpperCAmelCase__ = json.dumps(SCREAMING_SNAKE_CASE__ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ ) + """\n""" f.write(SCREAMING_SNAKE_CASE__ ) return metadata, index if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--switch_t5x_checkpoint_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size') parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted', type=str, required=False, help='Path to the output pytorch model.', ) UpperCAmelCase_ = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def _UpperCamelCase ( ): '''simple docstring''' from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer UpperCAmelCase__ = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" ) config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" ) UpperCAmelCase__ = SwitchTransformersForConditionalGeneration.from_pretrained( """/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" ) UpperCAmelCase__ = TaTokenizer.from_pretrained("""t5-small""" ) UpperCAmelCase__ = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""" UpperCAmelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).input_ids UpperCAmelCase__ = model.generate(SCREAMING_SNAKE_CASE__ , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
359
'''simple docstring''' UpperCAmelCase_ = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' UpperCAmelCase__ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 100000] number //= 100000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution UpperCAmelCase_ = [None] * 1_0_0_0_0_0_0_0 UpperCAmelCase_ = True UpperCAmelCase_ = False def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore UpperCAmelCase__ = chain(next_number(SCREAMING_SNAKE_CASE__ ) ) UpperCAmelCase__ = number_chain while number < 10000000: UpperCAmelCase__ = number_chain number *= 10 return number_chain def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 10000000 ): '''simple docstring''' for i in range(1 , SCREAMING_SNAKE_CASE__ ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod() print(f"{solution() = }")
61
0
"""simple docstring""" import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class A__ ( enum.Enum): A_ : List[Any] = 0 A_ : Dict = 1 A_ : Union[str, Any] = 2 @add_end_docstrings(_lowerCamelCase) class A__ ( _lowerCamelCase): A_ : str = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n ' def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. __lowerCAmelCase : Any = None if self.model.config.prefix is not None: __lowerCAmelCase : str = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. __lowerCAmelCase : Tuple = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=_SCREAMING_SNAKE_CASE , **self._forward_params ) __lowerCAmelCase : List[str] = {**self._preprocess_params, **preprocess_params} __lowerCAmelCase : List[str] = {**self._forward_params, **forward_params} def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : Optional[int] = {} if prefix is not None: __lowerCAmelCase : Union[str, Any] = prefix if prefix: __lowerCAmelCase : Dict = self.tokenizer( _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __lowerCAmelCase : List[Any] = prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" ' [None, \'hole\']' ) __lowerCAmelCase : int = handle_long_generation preprocess_params.update(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = generate_kwargs __lowerCAmelCase : List[Any] = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) __lowerCAmelCase : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) __lowerCAmelCase : List[Any] = ReturnType.TENSORS if return_type is not None: __lowerCAmelCase : Optional[Any] = return_type if clean_up_tokenization_spaces is not None: __lowerCAmelCase : Tuple = clean_up_tokenization_spaces if stop_sequence is not None: __lowerCAmelCase : Union[str, Any] = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) __lowerCAmelCase : Optional[Any] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Any = self.tokenizer( prefix + prompt_text , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __lowerCAmelCase : Optional[Any] = prompt_text if handle_long_generation == "hole": __lowerCAmelCase : str = inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: __lowerCAmelCase : Union[str, Any] = generate_kwargs['max_new_tokens'] else: __lowerCAmelCase : Any = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: __lowerCAmelCase : Any = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) __lowerCAmelCase : int = inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: __lowerCAmelCase : List[Any] = inputs['attention_mask'][:, -keep_length:] return inputs def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = model_inputs['input_ids'] __lowerCAmelCase : List[Any] = model_inputs.get('attention_mask' , _SCREAMING_SNAKE_CASE ) # Allow empty prompts if input_ids.shape[1] == 0: __lowerCAmelCase : Dict = None __lowerCAmelCase : str = None __lowerCAmelCase : Tuple = 1 else: __lowerCAmelCase : Any = input_ids.shape[0] __lowerCAmelCase : Union[str, Any] = model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. __lowerCAmelCase : Optional[int] = generate_kwargs.pop('prefix_length' , 0 ) if prefix_length > 0: __lowerCAmelCase : Any = 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: __lowerCAmelCase : List[str] = generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length __lowerCAmelCase : Dict = 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL __lowerCAmelCase : Optional[int] = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = generated_sequence.shape[0] if self.framework == "pt": __lowerCAmelCase : Dict = generated_sequence.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": __lowerCAmelCase : Any = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.FULL_TEXT , _SCREAMING_SNAKE_CASE=True ): __lowerCAmelCase : Any = model_outputs['generated_sequence'][0] __lowerCAmelCase : Tuple = model_outputs['input_ids'] __lowerCAmelCase : Any = model_outputs['prompt_text'] __lowerCAmelCase : int = generated_sequence.numpy().tolist() __lowerCAmelCase : Union[str, Any] = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: __lowerCAmelCase : int = {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text __lowerCAmelCase : Any = self.tokenizer.decode( _SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: __lowerCAmelCase : Optional[Any] = 0 else: __lowerCAmelCase : Any = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) ) if return_type == ReturnType.FULL_TEXT: __lowerCAmelCase : Union[str, Any] = prompt_text + text[prompt_length:] else: __lowerCAmelCase : int = text[prompt_length:] __lowerCAmelCase : Dict = {'generated_text': all_text} records.append(_SCREAMING_SNAKE_CASE ) return records
86
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Any ) -> Dict: A_ : Optional[Any] = nn.functional.normalize(_lowerCAmelCase ) A_ : List[str] = nn.functional.normalize(_lowerCAmelCase ) return torch.mm(_lowerCAmelCase , normalized_text_embeds.t() ) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = CLIPConfig __UpperCamelCase = ['''CLIPEncoderLayer'''] def __init__( self :int , snake_case :CLIPConfig ): '''simple docstring''' super().__init__(snake_case ) A_ : int = CLIPVisionModel(config.vision_config ) A_ : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case ) A_ : Tuple = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case ) A_ : str = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case ) A_ : List[str] = nn.Parameter(torch.ones(17 ) , requires_grad=snake_case ) A_ : int = nn.Parameter(torch.ones(3 ) , requires_grad=snake_case ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Any ): '''simple docstring''' A_ : List[Any] = self.vision_model(snake_case )[1] # pooled_output A_ : List[Any] = self.visual_projection(snake_case ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A_ : Optional[Any] = cosine_distance(snake_case , self.special_care_embeds ).cpu().float().numpy() A_ : Tuple = cosine_distance(snake_case , self.concept_embeds ).cpu().float().numpy() A_ : Union[str, Any] = [] A_ : Any = image_embeds.shape[0] for i in range(snake_case ): A_ : Optional[int] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images A_ : Optional[Any] = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): A_ : Optional[Any] = special_cos_dist[i][concept_idx] A_ : Tuple = self.special_care_embeds_weights[concept_idx].item() A_ : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} ) A_ : Any = 0.01 for concept_idx in range(len(cos_dist[0] ) ): A_ : Tuple = cos_dist[i][concept_idx] A_ : Tuple = self.concept_embeds_weights[concept_idx].item() A_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(snake_case ) result.append(snake_case ) A_ : Any = [len(res["bad_concepts"] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor ): '''simple docstring''' A_ : List[str] = self.vision_model(snake_case )[1] # pooled_output A_ : int = self.visual_projection(snake_case ) A_ : Tuple = cosine_distance(snake_case , self.special_care_embeds ) A_ : Tuple = cosine_distance(snake_case , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images A_ : Optional[Any] = 0.0 A_ : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) A_ : Optional[Any] = torch.any(special_scores > 0 , dim=1 ) A_ : Optional[Any] = special_care * 0.01 A_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) A_ : Union[str, Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) A_ : Union[str, Any] = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
300
0
"""simple docstring""" from __future__ import annotations def _lowercase ( __lowerCAmelCase ) -> list[int]: # This function is recursive SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(__lowerCAmelCase ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else SCREAMING_SNAKE_CASE__ : List[Any] = array[0] SCREAMING_SNAKE_CASE__ : Optional[int] = False SCREAMING_SNAKE_CASE__ : Optional[int] = 1 SCREAMING_SNAKE_CASE__ : list[int] = [] while not is_found and i < array_length: if array[i] < pivot: SCREAMING_SNAKE_CASE__ : Tuple = True SCREAMING_SNAKE_CASE__ : str = [element for element in array[i:] if element >= array[i]] SCREAMING_SNAKE_CASE__ : Union[str, Any] = longest_subsequence(__lowerCAmelCase ) if len(__lowerCAmelCase ) > len(__lowerCAmelCase ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = temp_array else: i += 1 SCREAMING_SNAKE_CASE__ : Any = [element for element in array[1:] if element >= pivot] SCREAMING_SNAKE_CASE__ : str = [pivot, *longest_subsequence(__lowerCAmelCase )] if len(__lowerCAmelCase ) > len(__lowerCAmelCase ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
56
"""simple docstring""" import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class __a (tf.keras.optimizers.schedules.LearningRateSchedule): '''simple docstring''' def __init__( self , _a , _a , _a , _a = 1.0 , _a = None , ) -> Union[str, Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : List[Any] = initial_learning_rate SCREAMING_SNAKE_CASE__ : Tuple = warmup_steps SCREAMING_SNAKE_CASE__ : Optional[Any] = power SCREAMING_SNAKE_CASE__ : Optional[Any] = decay_schedule_fn SCREAMING_SNAKE_CASE__ : Any = name def __call__( self , _a ) -> List[Any]: """simple docstring""" with tf.name_scope(self.name or """WarmUp""" ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.cast(_a , tf.floataa ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.cast(self.warmup_steps , tf.floataa ) SCREAMING_SNAKE_CASE__ : str = global_step_float / warmup_steps_float SCREAMING_SNAKE_CASE__ : Optional[int] = self.initial_learning_rate * tf.math.pow(_a , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_a , ) def _a ( self ) -> List[Any]: """simple docstring""" return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 0.9 , __lowerCAmelCase = 0.999 , __lowerCAmelCase = 1E-8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = None , ) -> Dict: SCREAMING_SNAKE_CASE__ : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=__lowerCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__lowerCAmelCase , ) if num_warmup_steps: SCREAMING_SNAKE_CASE__ : Dict = WarmUp( initial_learning_rate=__lowerCAmelCase , decay_schedule_fn=__lowerCAmelCase , warmup_steps=__lowerCAmelCase , ) if weight_decay_rate > 0.0: SCREAMING_SNAKE_CASE__ : int = AdamWeightDecay( learning_rate=__lowerCAmelCase , weight_decay_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=__lowerCAmelCase , ) else: SCREAMING_SNAKE_CASE__ : int = tf.keras.optimizers.Adam( learning_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class __a (UpperCamelCase_): '''simple docstring''' def __init__( self , _a = 0.001 , _a = 0.9 , _a = 0.999 , _a = 1E-7 , _a = False , _a = 0.0 , _a = None , _a = None , _a = "AdamWeightDecay" , **_a , ) -> Union[str, Any]: """simple docstring""" super().__init__(_a , _a , _a , _a , _a , _a , **_a ) SCREAMING_SNAKE_CASE__ : Tuple = weight_decay_rate SCREAMING_SNAKE_CASE__ : Tuple = include_in_weight_decay SCREAMING_SNAKE_CASE__ : Dict = exclude_from_weight_decay @classmethod def _a ( cls , _a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = {"""WarmUp""": WarmUp} return super(_a , cls ).from_config(_a , custom_objects=_a ) def _a ( self , _a , _a , _a ) -> str: """simple docstring""" super(_a , self )._prepare_local(_a , _a , _a ) SCREAMING_SNAKE_CASE__ : Tuple = tf.constant( self.weight_decay_rate , name="""adam_weight_decay_rate""" ) def _a ( self , _a , _a , _a ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , ) return tf.no_op() def _a ( self , _a , _a=None , **_a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = list(zip(*_a ) ) return super(_a , self ).apply_gradients(zip(_a , _a ) , name=_a , **_a ) def _a ( self , _a , _a , _a ) -> str: """simple docstring""" if apply_state is None: return self._decayed_lr_t[var_dtype], {} SCREAMING_SNAKE_CASE__ : Dict = apply_state or {} SCREAMING_SNAKE_CASE__ : List[str] = apply_state.get((var_device, var_dtype) ) if coefficients is None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._fallback_apply_state(_a , _a ) SCREAMING_SNAKE_CASE__ : List[Any] = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def _a ( self , _a , _a , _a=None ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , _a ) SCREAMING_SNAKE_CASE__ : Any = self._decay_weights_op(_a , _a , _a ) with tf.control_dependencies([decay] ): return super(_a , self )._resource_apply_dense(_a , _a , **_a ) def _a ( self , _a , _a , _a , _a=None ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self._get_lr(var.device , var.dtype.base_dtype , _a ) SCREAMING_SNAKE_CASE__ : Dict = self._decay_weights_op(_a , _a , _a ) with tf.control_dependencies([decay] ): return super(_a , self )._resource_apply_sparse(_a , _a , _a , **_a ) def _a ( self ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = super().get_config() config.update({"""weight_decay_rate""": self.weight_decay_rate} ) return config def _a ( self , _a ) -> Tuple: """simple docstring""" if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(_a , _a ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(_a , _a ) is not None: return False return True class __a (UpperCamelCase_): '''simple docstring''' def __init__( self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = [] SCREAMING_SNAKE_CASE__ : List[str] = None @property def _a ( self ) -> str: """simple docstring""" if self._accum_steps is None: SCREAMING_SNAKE_CASE__ : Dict = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=_a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def _a ( self ) -> List[str]: """simple docstring""" if not self._gradients: raise ValueError("""The accumulator should be called first to initialize the gradients""" ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self , _a ) -> str: """simple docstring""" if not self._gradients: SCREAMING_SNAKE_CASE__ : Optional[Any] = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(_a ) , trainable=_a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(_a ) != len(self._gradients ): raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(_a )}''' ) for accum_gradient, gradient in zip(self._gradients , _a ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(_a ) self._accum_steps.assign_add(1 ) def _a ( self ) -> Any: """simple docstring""" if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(_a ) )
56
1
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCamelCase : int = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" UpperCamelCase : str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" UpperCamelCase : List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def UpperCAmelCase ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ), } ) , ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = 4 , ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__UpperCAmelCase , hypotheses=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase ) }
316
"""simple docstring""" def A ( snake_case :int ) -> list: # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError('The given input must be positive' ) # get the generated string sequence __UpperCamelCase = gray_code_sequence_string(snake_case ) # # convert them to integers for i in range(len(snake_case ) ): __UpperCamelCase = int(sequence[i] , 2 ) return sequence def A ( snake_case :int ) -> list: # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] __UpperCamelCase = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits __UpperCamelCase = gray_code_sequence_string(bit_count - 1 ) __UpperCamelCase = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): __UpperCamelCase = '0' + smaller_sequence[i] sequence.append(snake_case ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): __UpperCamelCase = '1' + smaller_sequence[i] sequence.append(snake_case ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
316
1
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a :Optional[Any] = logging.get_logger(__name__) __a :Optional[Any] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __a :List[str] = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } __a :List[str] = {'facebook/blenderbot_small-90M': 512} def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" A_ = set() A_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) A_ = char A_ = set(__UpperCamelCase ) return pairs class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES _lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : List[Any] = ['input_ids', 'attention_mask'] def __init__( self : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any="__start__" , UpperCAmelCase : Any="__end__" , UpperCAmelCase : Tuple="__unk__" , UpperCAmelCase : int="__null__" , **UpperCAmelCase : Dict , ): super().__init__(unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , pad_token=UpperCAmelCase , **UpperCAmelCase ) with open(UpperCAmelCase , encoding="utf-8" ) as vocab_handle: A_ = json.load(UpperCAmelCase ) A_ = {v: k for k, v in self.encoder.items()} with open(UpperCAmelCase , encoding="utf-8" ) as merges_handle: A_ = merges_handle.read().split("\n" )[1:-1] A_ = [tuple(merge.split() ) for merge in merges] A_ = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) ) A_ = {} @property def __A ( self : Tuple ): return len(self.encoder ) def __A ( self : Union[str, Any] ): return dict(self.encoder , **self.added_tokens_encoder ) def __A ( self : str , UpperCAmelCase : str ): if token in self.cache: return self.cache[token] A_ = re.sub("([.,!?()])" , R" \1" , UpperCAmelCase ) A_ = re.sub("(')" , R" \1 " , UpperCAmelCase ) A_ = re.sub(R"\s{2,}" , " " , UpperCAmelCase ) if "\n" in token: A_ = token.replace("\n" , " __newln__" ) A_ = token.split(" " ) A_ = [] for token in tokens: if not len(UpperCAmelCase ): continue A_ = token.lower() A_ = tuple(UpperCAmelCase ) A_ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) A_ = get_pairs(UpperCAmelCase ) if not pairs: words.append(UpperCAmelCase ) continue while True: A_ = min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break A_ , A_ = bigram A_ = [] A_ = 0 while i < len(UpperCAmelCase ): try: A_ = word.index(UpperCAmelCase , UpperCAmelCase ) new_word.extend(word[i:j] ) A_ = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A_ = tuple(UpperCAmelCase ) A_ = new_word if len(UpperCAmelCase ) == 1: break else: A_ = get_pairs(UpperCAmelCase ) A_ = "@@ ".join(UpperCAmelCase ) A_ = word[:-4] A_ = word words.append(UpperCAmelCase ) return " ".join(UpperCAmelCase ) def __A ( self : List[Any] , UpperCAmelCase : str ): A_ = [] A_ = re.findall(R"\S+\n?" , UpperCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(UpperCAmelCase ).split(" " ) ) ) return split_tokens def __A ( self : Any , UpperCAmelCase : str ): A_ = token.lower() return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) ) def __A ( self : int , UpperCAmelCase : int ): return self.decoder.get(UpperCAmelCase , self.unk_token ) def __A ( self : Union[str, Any] , UpperCAmelCase : List[str] ): A_ = " ".join(UpperCAmelCase ).replace("@@ " , "" ).strip() return out_string def __A ( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ): if not os.path.isdir(UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return A_ = os.path.join( UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) A_ = os.path.join( UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + "\n" ) A_ = 0 with open(UpperCAmelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) A_ = token_index writer.write(" ".join(UpperCAmelCase ) + "\n" ) index += 1 return vocab_file, merge_file
329
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _a : """simple docstring""" @staticmethod def __A ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ): pass @is_pipeline_test @require_vision class _a ( unittest.TestCase ): """simple docstring""" @require_torch def __A ( self : List[str] ): A_ = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , ) A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(UpperCAmelCase ) , [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ] , ) A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], ] , ) @require_tf def __A ( self : int ): A_ = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" ) A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , ) A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], ] , ) @slow @require_torch def __A ( self : Any ): A_ = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , ) # This is an image of 2 cats with remotes and no planes A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , ) @slow @require_tf def __A ( self : Optional[Any] ): A_ = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" ) # This is an image of 2 cats with remotes and no planes A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , )
329
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer SCREAMING_SNAKE_CASE :Optional[Any] = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast SCREAMING_SNAKE_CASE :Dict = TaTokenizerFast SCREAMING_SNAKE_CASE :Tuple = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :List[Any] = [ '''MT5EncoderModel''', '''MT5ForConditionalGeneration''', '''MT5ForQuestionAnswering''', '''MT5Model''', '''MT5PreTrainedModel''', '''MT5Stack''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :List[Any] = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :int = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model'''] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys SCREAMING_SNAKE_CASE :int = _LazyModule( __name__, globals()['''__file__'''], _import_structure, extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast}, module_spec=__spec__, )
159
import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__) def _lowerCAmelCase ( lowerCAmelCase_ :List[Any] )->int: '''simple docstring''' print("Loading config file..." ) def flatten_yaml_as_dict(lowerCAmelCase_ :List[Any] , lowerCAmelCase_ :Optional[int]="" , lowerCAmelCase_ :int="." ): snake_case_ = [] for k, v in d.items(): snake_case_ = parent_key + sep + k if parent_key else k if isinstance(lowerCAmelCase_ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(lowerCAmelCase_ , lowerCAmelCase_ , sep=lowerCAmelCase_ ).items() ) else: items.append((new_key, v) ) return dict(lowerCAmelCase_ ) snake_case_ = argparse.Namespace() with open(lowerCAmelCase_ , "r" ) as yaml_file: try: snake_case_ = yaml.load(lowerCAmelCase_ , Loader=yaml.FullLoader ) snake_case_ = flatten_yaml_as_dict(lowerCAmelCase_ ) for k, v in flat_cfg.items(): setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) except yaml.YAMLError as exc: logger.error("Error while loading config file: {}. Error message: {}".format(lowerCAmelCase_ , str(lowerCAmelCase_ ) ) ) return config def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Tuple )->Union[str, Any]: '''simple docstring''' snake_case_ = MobileViTVaConfig() snake_case_ = False # dataset if task_name.startswith("imagenet1k_" ): snake_case_ = 1_000 if int(task_name.strip().split("_" )[-1] ) == 384: snake_case_ = 384 else: snake_case_ = 256 snake_case_ = "imagenet-1k-id2label.json" elif task_name.startswith("imagenet21k_to_1k_" ): snake_case_ = 21_000 if int(task_name.strip().split("_" )[-1] ) == 384: snake_case_ = 384 else: snake_case_ = 256 snake_case_ = "imagenet-22k-id2label.json" elif task_name.startswith("ade20k_" ): snake_case_ = 151 snake_case_ = 512 snake_case_ = "ade20k-id2label.json" snake_case_ = True elif task_name.startswith("voc_" ): snake_case_ = 21 snake_case_ = 512 snake_case_ = "pascal-voc-id2label.json" snake_case_ = True # orig_config snake_case_ = load_orig_config_file(lowerCAmelCase_ ) assert getattr(lowerCAmelCase_ , "model.classification.name" , -1 ) == "mobilevit_v2", "Invalid model" snake_case_ = getattr(lowerCAmelCase_ , "model.classification.mitv2.width_multiplier" , 1.0 ) assert ( getattr(lowerCAmelCase_ , "model.classification.mitv2.attn_norm_layer" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" snake_case_ = getattr(lowerCAmelCase_ , "model.classification.activation.name" , "swish" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: snake_case_ = getattr(lowerCAmelCase_ , "model.segmentation.output_stride" , 16 ) if "_deeplabv3" in task_name: snake_case_ = getattr(lowerCAmelCase_ , "model.segmentation.deeplabv3.aspp_rates" , [12, 24, 36] ) snake_case_ = getattr(lowerCAmelCase_ , "model.segmentation.deeplabv3.aspp_out_channels" , 512 ) snake_case_ = getattr(lowerCAmelCase_ , "model.segmentation.deeplabv3.aspp_dropout" , 0.1 ) # id2label snake_case_ = "huggingface/label-files" snake_case_ = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) ) snake_case_ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} return config def _lowerCAmelCase ( lowerCAmelCase_ :Any , lowerCAmelCase_ :Optional[Any] , lowerCAmelCase_ :Optional[Any] )->Optional[Any]: '''simple docstring''' snake_case_ = dct.pop(lowerCAmelCase_ ) snake_case_ = val def _lowerCAmelCase ( lowerCAmelCase_ :Dict , lowerCAmelCase_ :int=False )->Dict: '''simple docstring''' if base_model: snake_case_ = "" else: snake_case_ = "mobilevitv2." snake_case_ = [] for k in state_dict.keys(): if k[:8] == "encoder.": snake_case_ = k[8:] else: snake_case_ = k if ".block." in k: snake_case_ = k_new.replace(".block." , "." ) if ".conv." in k: snake_case_ = k_new.replace(".conv." , ".convolution." ) if ".norm." in k: snake_case_ = k_new.replace(".norm." , ".normalization." ) if "conv_1." in k: snake_case_ = k_new.replace("conv_1." , F'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if F'''layer_{i}.''' in k: snake_case_ = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: snake_case_ = k_new.replace(".exp_1x1." , ".expand_1x1." ) if ".red_1x1." in k: snake_case_ = k_new.replace(".red_1x1." , ".reduce_1x1." ) for i in [3, 4, 5]: if F'''layer_{i}.0.''' in k: snake_case_ = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if F'''layer_{i}.1.local_rep.0.''' in k: snake_case_ = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if F'''layer_{i}.1.local_rep.1.''' in k: snake_case_ = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: snake_case_ = [0, 1] elif i == 4: snake_case_ = [0, 1, 2, 3] elif i == 5: snake_case_ = [0, 1, 2] for j in j_in: if F'''layer_{i}.1.global_rep.{j}.''' in k: snake_case_ = k_new.replace( F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if F'''layer_{i}.1.global_rep.{j+1}.''' in k: snake_case_ = k_new.replace( F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if F'''layer_{i}.1.conv_proj.''' in k: snake_case_ = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: snake_case_ = k_new.replace("pre_norm_attn.0." , "layernorm_before." ) if "pre_norm_attn.1." in k: snake_case_ = k_new.replace("pre_norm_attn.1." , "attention." ) if "pre_norm_ffn.0." in k: snake_case_ = k_new.replace("pre_norm_ffn.0." , "layernorm_after." ) if "pre_norm_ffn.1." in k: snake_case_ = k_new.replace("pre_norm_ffn.1." , "ffn.conv1." ) if "pre_norm_ffn.3." in k: snake_case_ = k_new.replace("pre_norm_ffn.3." , "ffn.conv2." ) if "classifier.1." in k: snake_case_ = k_new.replace("classifier.1." , "classifier." ) if "seg_head." in k: snake_case_ = k_new.replace("seg_head." , "segmentation_head." ) if ".aspp_layer." in k: snake_case_ = k_new.replace(".aspp_layer." , "." ) if ".aspp_pool." in k: snake_case_ = k_new.replace(".aspp_pool." , "." ) rename_keys.append((k, k_new) ) return rename_keys def _lowerCAmelCase ( lowerCAmelCase_ :Optional[Any] )->Optional[int]: '''simple docstring''' snake_case_ = [] for k in state_dict.keys(): if k.startswith("seg_head.aux_head." ): keys_to_ignore.append(lowerCAmelCase_ ) for k in keys_to_ignore: state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ ) def _lowerCAmelCase ( )->List[Any]: '''simple docstring''' snake_case_ = "http://images.cocodataset.org/val2017/000000039769.jpg" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" snake_case_ = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ) return im @torch.no_grad() def _lowerCAmelCase ( lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Dict , lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Dict )->Dict: '''simple docstring''' snake_case_ = get_mobilevitva_config(lowerCAmelCase_ , lowerCAmelCase_ ) # load original state_dict snake_case_ = torch.load(lowerCAmelCase_ , map_location="cpu" ) # load huggingface model if task_name.startswith("ade20k_" ) or task_name.startswith("voc_" ): snake_case_ = MobileViTVaForSemanticSegmentation(lowerCAmelCase_ ).eval() snake_case_ = False else: snake_case_ = MobileViTVaForImageClassification(lowerCAmelCase_ ).eval() snake_case_ = False # remove and rename some keys of load the original model snake_case_ = checkpoint remove_unused_keys(lowerCAmelCase_ ) snake_case_ = create_rename_keys(lowerCAmelCase_ , base_model=lowerCAmelCase_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # load modified state_dict model.load_state_dict(lowerCAmelCase_ ) # Check outputs on an image, prepared by MobileViTImageProcessor snake_case_ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) snake_case_ = image_processor(images=prepare_img() , return_tensors="pt" ) snake_case_ = model(**lowerCAmelCase_ ) # verify classification model if task_name.startswith("imagenet" ): snake_case_ = outputs.logits snake_case_ = logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("imagenet1k_256" ) and config.width_multiplier == 1.0: # expected_logits for base variant snake_case_ = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ) assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCAmelCase_ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) SCREAMING_SNAKE_CASE :int = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
159
1
"""simple docstring""" import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Optional[Any] = checkpoint UpperCAmelCase_ : Optional[int] = {} UpperCAmelCase_ : int = vae_state_dict["encoder.conv_in.weight"] UpperCAmelCase_ : Optional[int] = vae_state_dict["encoder.conv_in.bias"] UpperCAmelCase_ : Any = vae_state_dict["encoder.conv_out.weight"] UpperCAmelCase_ : Dict = vae_state_dict["encoder.conv_out.bias"] UpperCAmelCase_ : str = vae_state_dict["encoder.norm_out.weight"] UpperCAmelCase_ : Optional[Any] = vae_state_dict["encoder.norm_out.bias"] UpperCAmelCase_ : Dict = vae_state_dict["decoder.conv_in.weight"] UpperCAmelCase_ : Optional[Any] = vae_state_dict["decoder.conv_in.bias"] UpperCAmelCase_ : Tuple = vae_state_dict["decoder.conv_out.weight"] UpperCAmelCase_ : Tuple = vae_state_dict["decoder.conv_out.bias"] UpperCAmelCase_ : Union[str, Any] = vae_state_dict["decoder.norm_out.weight"] UpperCAmelCase_ : str = vae_state_dict["decoder.norm_out.bias"] UpperCAmelCase_ : List[str] = vae_state_dict["quant_conv.weight"] UpperCAmelCase_ : int = vae_state_dict["quant_conv.bias"] UpperCAmelCase_ : Dict = vae_state_dict["post_quant_conv.weight"] UpperCAmelCase_ : List[Any] = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only UpperCAmelCase_ : Optional[int] = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} ) UpperCAmelCase_ : str = { layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__lowerCamelCase ) } # Retrieves the keys for the decoder up blocks only UpperCAmelCase_ : int = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} ) UpperCAmelCase_ : Dict = { layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__lowerCamelCase ) } for i in range(__lowerCamelCase ): UpperCAmelCase_ : str = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key] if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict: UpperCAmelCase_ : Dict = vae_state_dict.pop( f"""encoder.down.{i}.downsample.conv.weight""" ) UpperCAmelCase_ : List[Any] = vae_state_dict.pop( f"""encoder.down.{i}.downsample.conv.bias""" ) UpperCAmelCase_ : str = renew_vae_resnet_paths(__lowerCamelCase ) UpperCAmelCase_ : Optional[Any] = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""} assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase ) UpperCAmelCase_ : List[Any] = [key for key in vae_state_dict if "encoder.mid.block" in key] UpperCAmelCase_ : List[Any] = 2 for i in range(1, num_mid_res_blocks + 1 ): UpperCAmelCase_ : Optional[int] = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key] UpperCAmelCase_ : List[Any] = renew_vae_resnet_paths(__lowerCamelCase ) UpperCAmelCase_ : Dict = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""} assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase ) UpperCAmelCase_ : Any = [key for key in vae_state_dict if "encoder.mid.attn" in key] UpperCAmelCase_ : Any = renew_vae_attention_paths(__lowerCamelCase ) UpperCAmelCase_ : List[Any] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase ) conv_attn_to_linear(__lowerCamelCase ) for i in range(__lowerCamelCase ): UpperCAmelCase_ : str = num_up_blocks - 1 - i UpperCAmelCase_ : Optional[Any] = [ key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key ] if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict: UpperCAmelCase_ : Any = vae_state_dict[ f"""decoder.up.{block_id}.upsample.conv.weight""" ] UpperCAmelCase_ : Optional[Any] = vae_state_dict[ f"""decoder.up.{block_id}.upsample.conv.bias""" ] UpperCAmelCase_ : List[str] = renew_vae_resnet_paths(__lowerCamelCase ) UpperCAmelCase_ : Tuple = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""} assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase ) UpperCAmelCase_ : Dict = [key for key in vae_state_dict if "decoder.mid.block" in key] UpperCAmelCase_ : int = 2 for i in range(1, num_mid_res_blocks + 1 ): UpperCAmelCase_ : Optional[Any] = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key] UpperCAmelCase_ : Any = renew_vae_resnet_paths(__lowerCamelCase ) UpperCAmelCase_ : Any = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""} assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase ) UpperCAmelCase_ : Tuple = [key for key in vae_state_dict if "decoder.mid.attn" in key] UpperCAmelCase_ : Optional[Any] = renew_vae_attention_paths(__lowerCamelCase ) UpperCAmelCase_ : str = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase ) conv_attn_to_linear(__lowerCamelCase ) return new_checkpoint def __a ( __lowerCamelCase, __lowerCamelCase, ): # Only support V1 UpperCAmelCase_ : int = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) UpperCAmelCase_ : Dict = io.BytesIO(r.content ) UpperCAmelCase_ : Tuple = OmegaConf.load(__lowerCamelCase ) UpperCAmelCase_ : int = 512 UpperCAmelCase_ : Optional[int] = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors" ): from safetensors import safe_open UpperCAmelCase_ : Union[str, Any] = {} with safe_open(__lowerCamelCase, framework="pt", device="cpu" ) as f: for key in f.keys(): UpperCAmelCase_ : Union[str, Any] = f.get_tensor(__lowerCamelCase ) else: UpperCAmelCase_ : Union[str, Any] = torch.load(__lowerCamelCase, map_location=__lowerCamelCase )["state_dict"] # Convert the VAE model. UpperCAmelCase_ : Any = create_vae_diffusers_config(__lowerCamelCase, image_size=__lowerCamelCase ) UpperCAmelCase_ : Optional[Any] = custom_convert_ldm_vae_checkpoint(__lowerCamelCase, __lowerCamelCase ) UpperCAmelCase_ : str = AutoencoderKL(**__lowerCamelCase ) vae.load_state_dict(__lowerCamelCase ) vae.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": _a = argparse.ArgumentParser() parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') _a = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
23
"""simple docstring""" import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) _a = logging.getLogger() def __a ( ): UpperCAmelCase_ : Tuple = argparse.ArgumentParser() parser.add_argument("-f" ) UpperCAmelCase_ : Dict = parser.parse_args() return args.f class A_ (lowercase__ ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout ) logger.addHandler(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , "run_glue_deebert.py" ) with patch.object(lowercase_ , "argv" , lowercase_ ): UpperCAmelCase_ : List[str] = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(lowercase_ , 0.6_66 ) @slow @require_torch_non_multi_gpu def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split() self.run_and_check(lowercase_ ) UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(lowercase_ ) UpperCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(lowercase_ )
23
1
"""simple docstring""" import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __A ( __a, unittest.TestCase ): """simple docstring""" __lowerCAmelCase = CodeGenTokenizer __lowerCAmelCase = CodeGenTokenizerFast __lowerCAmelCase = True __lowerCAmelCase = {"add_prefix_space": True} __lowerCAmelCase = False def SCREAMING_SNAKE_CASE ( self ) -> str: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt a =[ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", """<|endoftext|>""", ] a =dict(zip(a_ , range(len(a_ ) ) ) ) a =["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] a ={"""unk_token""": """<unk>"""} a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(a_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(a_ ) ) def SCREAMING_SNAKE_CASE ( self , **__A ) -> Tuple: kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a_ ) def SCREAMING_SNAKE_CASE ( self , **__A ) -> int: kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a_ ) def SCREAMING_SNAKE_CASE ( self , __A ) -> Dict: a ="""lower newer""" a ="""lower newer""" return input_text, output_text def SCREAMING_SNAKE_CASE ( self ) -> Any: a =CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) a ="""lower newer""" a =["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] a =tokenizer.tokenize(a_ , add_prefix_space=a_ ) self.assertListEqual(a_ , a_ ) a =tokens + [tokenizer.unk_token] a =[14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: if not self.test_rust_tokenizer: return a =self.get_tokenizer() a =self.get_rust_tokenizer(add_prefix_space=a_ ) a ="""lower newer""" # Testing tokenization a =tokenizer.tokenize(a_ , add_prefix_space=a_ ) a =rust_tokenizer.tokenize(a_ ) self.assertListEqual(a_ , a_ ) # Testing conversion to ids without special tokens a =tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ ) a =rust_tokenizer.encode(a_ , add_special_tokens=a_ ) self.assertListEqual(a_ , a_ ) # Testing conversion to ids with special tokens a =self.get_rust_tokenizer(add_prefix_space=a_ ) a =tokenizer.encode(a_ , add_prefix_space=a_ ) a =rust_tokenizer.encode(a_ ) self.assertListEqual(a_ , a_ ) # Testing the unknown token a =tokens + [rust_tokenizer.unk_token] a =[14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ) , a_ ) def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE ( self , __A=15 ) -> Optional[int]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a =self.rust_tokenizer_class.from_pretrained(a_ , **a_ ) # Simple input a ="""This is a simple input""" a =["""This is a simple input 1""", """This is a simple input 2"""] a =("""This is a simple input""", """This is a pair""") a =[ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding='''max_length''' ) # Simple input self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding='''max_length''' ) # Simple input self.assertRaises( a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding='''max_length''' , ) # Pair input self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding='''max_length''' ) # Pair input self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding='''max_length''' ) # Pair input self.assertRaises( a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding='''max_length''' , ) def SCREAMING_SNAKE_CASE ( self ) -> Dict: a =CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' ) # Simple input a ="""This is a simple input""" a =["""This is a simple input looooooooong""", """This is a simple input"""] a =("""This is a simple input""", """This is a pair""") a =[ ("""This is a simple input loooooong""", """This is a simple input"""), ("""This is a simple pair loooooong""", """This is a simple pair"""), ] a =tokenizer.pad_token_id a =tokenizer(a_ , padding='''max_length''' , max_length=30 , return_tensors='''np''' ) a =tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors='''np''' ) a =tokenizer(*a_ , padding='''max_length''' , max_length=60 , return_tensors='''np''' ) a =tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors='''np''' ) # s # test single string max_length padding self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['''input_ids'''] ) self.assertTrue(0 in out_s['''attention_mask'''] ) # s2 # test automatic padding self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] ) self.assertFalse(0 in out_sa['''attention_mask'''][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] ) self.assertTrue(0 in out_sa['''attention_mask'''][1] ) # p # test single pair max_length padding self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['''input_ids'''] ) self.assertTrue(0 in out_p['''attention_mask'''] ) # p2 # test automatic padding pair self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] ) self.assertFalse(0 in out_pa['''attention_mask'''][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] ) self.assertTrue(0 in out_pa['''attention_mask'''][1] ) def SCREAMING_SNAKE_CASE ( self ) -> int: a ="""$$$""" a =CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a_ , add_bos_token=a_ ) a ="""This is a simple input""" a =["""This is a simple input 1""", """This is a simple input 2"""] a =tokenizer.bos_token_id a =tokenizer(a_ ) a =tokenizer(a_ ) self.assertEqual(out_s.input_ids[0] , a_ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) a =tokenizer.decode(out_s.input_ids ) a =tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , a_ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a =CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' ) a ="""\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#""" a ="""\nif len_a > len_b: result = a\nelse: result = b""" a =tokenizer.encode(a_ ) a =["""^#""", re.escape('''<|endoftext|>''' ), """^'''""", """^\"\"\"""", """\n\n\n"""] a =tokenizer.decode(a_ , truncate_before_pattern=a_ ) self.assertEqual(a_ , a_ ) def SCREAMING_SNAKE_CASE ( self ) -> str: pass
81
"""simple docstring""" import argparse import hashlib # hashlib is only used inside the Test class import struct class lowercase: '''simple docstring''' def __init__( self: List[Any], a_: List[str] ): '''simple docstring''' _snake_case : int = data _snake_case : Dict = [0X67452301, 0Xefcdab89, 0X98badcfe, 0X10325476, 0Xc3d2e1f0] @staticmethod def UpperCamelCase_ ( a_: Optional[Any], a_: Dict ): '''simple docstring''' return ((n << b) | (n >> (32 - b))) & 0Xffffffff def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Union[str, Any] = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64) _snake_case : Optional[int] = self.data + padding + struct.pack(""">Q""", 8 * len(self.data ) ) return padded_data def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' return [ self.padded_data[i : i + 64] for i in range(0, len(self.padded_data ), 64 ) ] def UpperCamelCase_ ( self: Optional[Any], a_: List[Any] ): '''simple docstring''' _snake_case : List[str] = list(struct.unpack(""">16L""", a_ ) ) + [0] * 64 for i in range(16, 80 ): _snake_case : List[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1 ) return w def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Union[str, Any] = self.padding() _snake_case : str = self.split_blocks() for block in self.blocks: _snake_case : Any = self.expand_block(a_ ) _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = self.h for i in range(0, 80 ): if 0 <= i < 20: _snake_case : int = (b & c) | ((~b) & d) _snake_case : str = 0X5a827999 elif 20 <= i < 40: _snake_case : Optional[int] = b ^ c ^ d _snake_case : str = 0X6ed9eba1 elif 40 <= i < 60: _snake_case : List[Any] = (b & c) | (b & d) | (c & d) _snake_case : List[Any] = 0X8f1bbcdc elif 60 <= i < 80: _snake_case : List[Any] = b ^ c ^ d _snake_case : int = 0Xca62c1d6 _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = ( self.rotate(a_, 5 ) + f + e + k + expanded_block[i] & 0Xffffffff, a, self.rotate(a_, 30 ), c, d, ) _snake_case : Union[str, Any] = ( self.h[0] + a & 0Xffffffff, self.h[1] + b & 0Xffffffff, self.h[2] + c & 0Xffffffff, self.h[3] + d & 0Xffffffff, self.h[4] + e & 0Xffffffff, ) return ("{:08x}" * 5).format(*self.h ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Any = B"""Test String""" assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324 def UpperCAmelCase__ (): """simple docstring""" _snake_case : List[Any] = argparse.ArgumentParser(description="""Process some strings or files""" ) parser.add_argument( """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) _snake_case : Union[str, Any] = parser.parse_args() _snake_case : List[Any] = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: _snake_case : str = f.read() else: _snake_case : int = bytes(snake_case__ , """utf-8""" ) print(SHAaHash(snake_case__ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
64
0
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : Tuple = 'speech_to_text_2' A_ : Tuple = ['past_key_values'] A_ : Dict = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__(self : int , a__ : List[str]=1_0000 , a__ : List[Any]=6 , a__ : Optional[Any]=2048 , a__ : List[Any]=4 , a__ : Optional[Any]=0.0 , a__ : Dict=True , a__ : Optional[int]="relu" , a__ : Optional[int]=256 , a__ : Union[str, Any]=0.1 , a__ : int=0.0 , a__ : Optional[int]=0.0 , a__ : List[str]=0.0_2 , a__ : Union[str, Any]=2 , a__ : int=True , a__ : List[Any]=1 , a__ : str=0 , a__ : List[Any]=2 , a__ : Any=1024 , **a__ : Dict , ): """simple docstring""" __snake_case = vocab_size __snake_case = d_model __snake_case = decoder_ffn_dim __snake_case = decoder_layers __snake_case = decoder_attention_heads __snake_case = dropout __snake_case = attention_dropout __snake_case = activation_dropout __snake_case = activation_function __snake_case = init_std __snake_case = decoder_layerdrop __snake_case = use_cache __snake_case = decoder_layers __snake_case = scale_embedding # scale factor will be sqrt(d_model) if True __snake_case = max_target_positions super().__init__( pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , decoder_start_token_id=a__ , **a__ , )
352
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING snake_case_ = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): def __init__(self : int , *a__ : List[Any] , **a__ : Dict ): """simple docstring""" super().__init__(*a__ , **a__ ) requires_backends(self , '''vision''' ) self.check_model_type(a__ ) def __call__(self : Optional[Any] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : List[str] ): """simple docstring""" return super().__call__(a__ , **a__ ) def a (self : int , **a__ : int ): """simple docstring""" return {}, {}, {} def a (self : Optional[int] , a__ : Optional[int] ): """simple docstring""" __snake_case = load_image(a__ ) __snake_case = image.size __snake_case = self.image_processor(images=a__ , return_tensors=self.framework ) return model_inputs def a (self : List[Any] , a__ : Union[str, Any] ): """simple docstring""" __snake_case = self.model(**a__ ) return model_outputs def a (self : int , a__ : str ): """simple docstring""" __snake_case = model_outputs.predicted_depth __snake_case = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ ) __snake_case = prediction.squeeze().cpu().numpy() __snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' ) __snake_case = Image.fromarray(a__ ) __snake_case = {} __snake_case = predicted_depth __snake_case = depth return output_dict
238
0
import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor lowerCamelCase__ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): def __init__( self : List[str] , *__lowercase : int , **__lowercase : List[Any] ): '''simple docstring''' warnings.warn( """The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use DeformableDetrImageProcessor instead.""" , __lowercase , ) super().__init__(*__lowercase , **__lowercase )
302
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
302
1
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Any = logging.get_logger(__name__) class UpperCAmelCase_ ( a_ ): __SCREAMING_SNAKE_CASE : Union[str, Any] = '''encoder-decoder''' __SCREAMING_SNAKE_CASE : str = True def __init__( self : int , **A : str ): super().__init__(**lowercase_ ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" _UpperCAmelCase : Optional[Any] = kwargs.pop("encoder" ) _UpperCAmelCase : Tuple = encoder_config.pop("model_type" ) _UpperCAmelCase : Tuple = kwargs.pop("decoder" ) _UpperCAmelCase : Optional[Any] = decoder_config.pop("model_type" ) from ..auto.configuration_auto import AutoConfig _UpperCAmelCase : Dict = AutoConfig.for_model(lowercase_ , **lowercase_ ) _UpperCAmelCase : Union[str, Any] = AutoConfig.for_model(lowercase_ , **lowercase_ ) _UpperCAmelCase : Any = True @classmethod def snake_case_ ( cls : int , A : Dict , A : Tuple , **A : int ): logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" ) _UpperCAmelCase : Dict = True _UpperCAmelCase : str = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowercase_ ) def snake_case_ ( self : Dict ): _UpperCAmelCase : Tuple = copy.deepcopy(self.__dict__ ) _UpperCAmelCase : List[str] = self.encoder.to_dict() _UpperCAmelCase : Tuple = self.decoder.to_dict() _UpperCAmelCase : int = self.__class__.model_type return output
364
"""simple docstring""" import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : List[str] = { "google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json", "google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json", "google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json", } class UpperCAmelCase_ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE : int = 'owlvit_text_model' def __init__( self : int , A : int=4_9_4_0_8 , A : Optional[Any]=5_1_2 , A : Optional[Any]=2_0_4_8 , A : str=1_2 , A : int=8 , A : Tuple=1_6 , A : List[Any]="quick_gelu" , A : Tuple=1e-5 , A : Union[str, Any]=0.0 , A : List[Any]=0.02 , A : str=1.0 , A : str=0 , A : List[str]=4_9_4_0_6 , A : str=4_9_4_0_7 , **A : Optional[Any] , ): super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A ) _UpperCAmelCase : Union[str, Any] = vocab_size _UpperCAmelCase : str = hidden_size _UpperCAmelCase : List[Any] = intermediate_size _UpperCAmelCase : Any = num_hidden_layers _UpperCAmelCase : str = num_attention_heads _UpperCAmelCase : List[str] = max_position_embeddings _UpperCAmelCase : List[Any] = hidden_act _UpperCAmelCase : Tuple = layer_norm_eps _UpperCAmelCase : List[str] = attention_dropout _UpperCAmelCase : Optional[Any] = initializer_range _UpperCAmelCase : List[Any] = initializer_factor @classmethod def snake_case_ ( cls : Any , A : Union[str, os.PathLike] , **A : Dict ): cls._set_token_in_kwargs(A ) _UpperCAmelCase , _UpperCAmelCase : List[str] = cls.get_config_dict(A , **A ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": _UpperCAmelCase : int = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(A , **A ) class UpperCAmelCase_ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE : Tuple = 'owlvit_vision_model' def __init__( self : Union[str, Any] , A : Optional[int]=7_6_8 , A : int=3_0_7_2 , A : List[str]=1_2 , A : List[str]=1_2 , A : Optional[int]=3 , A : Optional[int]=7_6_8 , A : str=3_2 , A : Tuple="quick_gelu" , A : Dict=1e-5 , A : Optional[int]=0.0 , A : List[Any]=0.02 , A : str=1.0 , **A : Tuple , ): super().__init__(**A ) _UpperCAmelCase : List[str] = hidden_size _UpperCAmelCase : Tuple = intermediate_size _UpperCAmelCase : Optional[Any] = num_hidden_layers _UpperCAmelCase : Dict = num_attention_heads _UpperCAmelCase : Optional[Any] = num_channels _UpperCAmelCase : Union[str, Any] = image_size _UpperCAmelCase : Dict = patch_size _UpperCAmelCase : List[str] = hidden_act _UpperCAmelCase : Union[str, Any] = layer_norm_eps _UpperCAmelCase : Any = attention_dropout _UpperCAmelCase : Tuple = initializer_range _UpperCAmelCase : Tuple = initializer_factor @classmethod def snake_case_ ( cls : Optional[int] , A : Union[str, os.PathLike] , **A : int ): cls._set_token_in_kwargs(A ) _UpperCAmelCase , _UpperCAmelCase : Dict = cls.get_config_dict(A , **A ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": _UpperCAmelCase : Tuple = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(A , **A ) class UpperCAmelCase_ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE : List[str] = 'owlvit' __SCREAMING_SNAKE_CASE : Optional[Any] = True def __init__( self : Optional[Any] , A : Dict=None , A : Tuple=None , A : Optional[Any]=5_1_2 , A : Optional[Any]=2.6_592 , A : int=True , **A : Tuple , ): super().__init__(**A ) if text_config is None: _UpperCAmelCase : List[Any] = {} logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." ) if vision_config is None: _UpperCAmelCase : Tuple = {} logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." ) _UpperCAmelCase : str = OwlViTTextConfig(**A ) _UpperCAmelCase : int = OwlViTVisionConfig(**A ) _UpperCAmelCase : Optional[Any] = projection_dim _UpperCAmelCase : str = logit_scale_init_value _UpperCAmelCase : Optional[Any] = return_dict _UpperCAmelCase : str = 1.0 @classmethod def snake_case_ ( cls : Dict , A : Union[str, os.PathLike] , **A : Any ): cls._set_token_in_kwargs(A ) _UpperCAmelCase , _UpperCAmelCase : str = cls.get_config_dict(A , **A ) if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(A , **A ) @classmethod def snake_case_ ( cls : Optional[int] , A : Dict , A : Dict , **A : Optional[Any] ): _UpperCAmelCase : Optional[Any] = {} _UpperCAmelCase : int = text_config _UpperCAmelCase : Dict = vision_config return cls.from_dict(A , **A ) def snake_case_ ( self : Optional[int] ): _UpperCAmelCase : str = copy.deepcopy(self.__dict__ ) _UpperCAmelCase : Optional[int] = self.text_config.to_dict() _UpperCAmelCase : Optional[int] = self.vision_config.to_dict() _UpperCAmelCase : List[Any] = self.__class__.model_type return output class UpperCAmelCase_ ( _UpperCamelCase ): @property def snake_case_ ( self : List[str] ): return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("attention_mask", {0: "batch", 1: "sequence"}), ] ) @property def snake_case_ ( self : Optional[int] ): return OrderedDict( [ ("logits_per_image", {0: "batch"}), ("logits_per_text", {0: "batch"}), ("text_embeds", {0: "batch"}), ("image_embeds", {0: "batch"}), ] ) @property def snake_case_ ( self : str ): return 1e-4 def snake_case_ ( self : str , A : "ProcessorMixin" , A : int = -1 , A : int = -1 , A : Optional["TensorType"] = None , ): _UpperCAmelCase : Optional[Any] = super().generate_dummy_inputs( processor.tokenizer , batch_size=A , seq_length=A , framework=A ) _UpperCAmelCase : Union[str, Any] = super().generate_dummy_inputs( processor.image_processor , batch_size=A , framework=A ) return {**text_input_dict, **image_input_dict} @property def snake_case_ ( self : List[Any] ): return 1_4
202
0
"""simple docstring""" import math def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [True] * n __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): __SCREAMING_SNAKE_CASE = i * 2 while index < n: __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = index + i __SCREAMING_SNAKE_CASE = [2] for i in range(3 , snake_case__ , 2 ): if is_prime[i]: primes.append(snake_case__ ) return primes def UpperCAmelCase__ (lowerCAmelCase_ = 9999_6666_3333 ): '''simple docstring''' __SCREAMING_SNAKE_CASE = math.floor(math.sqrt(snake_case__ ) ) + 100 __SCREAMING_SNAKE_CASE = prime_sieve(snake_case__ ) __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = primes[prime_index] while (last_prime**2) <= limit: __SCREAMING_SNAKE_CASE = primes[prime_index + 1] __SCREAMING_SNAKE_CASE = last_prime**2 __SCREAMING_SNAKE_CASE = next_prime**2 # Get numbers divisible by lps(current) __SCREAMING_SNAKE_CASE = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) __SCREAMING_SNAKE_CASE = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps __SCREAMING_SNAKE_CASE = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair __SCREAMING_SNAKE_CASE = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
54
"""simple docstring""" import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() A_ = logging.get_logger(__name__) A_ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } A_ = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str , snake_case__ : str ): """simple docstring""" for attribute in key.split(""".""" ): _snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ ) if weight_type is not None: _snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ ).shape else: _snake_case : Optional[Any] = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": _snake_case : int = value elif weight_type == "weight_g": _snake_case : str = value elif weight_type == "weight_v": _snake_case : Tuple = value elif weight_type == "bias": _snake_case : List[str] = value else: _snake_case : int = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str] ): """simple docstring""" _snake_case : List[Any] = [] _snake_case : Optional[Any] = fairseq_model.state_dict() _snake_case : str = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight _snake_case : Optional[Any] = None for name, value in fairseq_dict.items(): _snake_case : Optional[Any] = False if "conv_layers" in name: load_conv_layer( snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , ) _snake_case : Dict = True elif name.split(""".""" )[0] == "proj": _snake_case : Dict = fairseq_model.proj _snake_case : Optional[int] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: _snake_case : Dict = True if "*" in mapped_key: _snake_case : Optional[int] = name.split(snake_case__ )[0].split(""".""" )[-2] _snake_case : Union[str, Any] = mapped_key.replace("""*""" , snake_case__ ) if "weight_g" in name: _snake_case : str = """weight_g""" elif "weight_v" in name: _snake_case : Optional[Any] = """weight_v""" elif "bias" in name: _snake_case : Union[str, Any] = """bias""" elif "weight" in name: _snake_case : int = """weight""" else: _snake_case : Optional[int] = None set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) continue if not is_used: unused_weights.append(snake_case__ ) logger.warning(F"Unused weights: {unused_weights}" ) return proj_weight def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int ): """simple docstring""" _snake_case : Any = full_name.split("""conv_layers.""" )[-1] _snake_case : Optional[int] = name.split(""".""" ) _snake_case : List[str] = int(items[0] ) _snake_case : Dict = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) _snake_case : Tuple = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) _snake_case : List[Any] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) _snake_case : int = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) _snake_case : List[str] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(snake_case__ ) def UpperCAmelCase__ (snake_case__ : Union[str, Any] ): """simple docstring""" _snake_case , _snake_case : Optional[Any] = emb.weight.shape _snake_case : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) _snake_case : Union[str, Any] = emb.weight.data return lin_layer def UpperCAmelCase__ (snake_case__ : List[Any] ): """simple docstring""" with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f: _snake_case : Any = f.readlines() _snake_case : Optional[Any] = [line.split(""" """ )[0] for line in lines] _snake_case : str = len(snake_case__ ) _snake_case : Tuple = { """<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3, } vocab_dict.update(dict(zip(snake_case__ , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def UpperCAmelCase__ (snake_case__ : int , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Union[str, Any] , ): """simple docstring""" _snake_case : Optional[int] = WavaVecaConfig.from_pretrained(snake_case__ ) _snake_case : List[str] = SpeechaTextaConfig.from_pretrained( snake_case__ , vocab_size=snake_case__ , decoder_layers=snake_case__ , do_stable_layer_norm=snake_case__ ) _snake_case : Dict = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , ) _snake_case , _snake_case , _snake_case : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) _snake_case : Optional[Any] = model[0].eval() # set weights for wav2vec2 encoder _snake_case : Any = WavaVecaModel(snake_case__ ) _snake_case : Optional[Any] = recursively_load_weights_wavaveca(model.encoder , snake_case__ ) _snake_case : Optional[Any] = SpeechaTextaForCausalLM(snake_case__ ) _snake_case , _snake_case : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case__ ) # set output linear layer unexpected_keys.remove("""embed_out""" ) _snake_case : Any = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) _snake_case : Any = SpeechEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ ) _snake_case : Any = False # add projection layer _snake_case : int = nn.Parameter(projection_layer.weight ) _snake_case : Any = nn.Parameter(projection_layer.bias ) _snake_case : Any = create_vocab_dict(snake_case__ ) with open(os.path.join(snake_case__ , """vocab.json""" ) , """w""" ) as fp: json.dump(snake_case__ , snake_case__ ) _snake_case : Dict = SpeechaTextaTokenizer(os.path.join(snake_case__ , """vocab.json""" ) ) tokenizer.save_pretrained(snake_case__ ) _snake_case : str = hf_wavavec.config.to_dict() _snake_case : List[str] = tokenizer.pad_token_id _snake_case : Union[str, Any] = tokenizer.bos_token_id _snake_case : Union[str, Any] = tokenizer.eos_token_id _snake_case : Optional[Any] = """speech_to_text_2""" _snake_case : Optional[int] = """wav2vec2""" _snake_case : Tuple = SpeechEncoderDecoderConfig.from_dict(snake_case__ ) hf_wavavec.save_pretrained(snake_case__ ) feature_extractor.save_pretrained(snake_case__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument( '''--encoder_config_path''', default='''facebook/wav2vec2-large-lv60''', type=str, help='''Path to hf encoder wav2vec2 checkpoint config''', ) parser.add_argument( '''--decoder_config_path''', default='''facebook/s2t-small-mustc-en-fr-st''', type=str, help='''Path to hf decoder s2t checkpoint config''', ) parser.add_argument('''--vocab_size''', default=1_02_24, type=int, help='''Vocab size of decoder''') parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''') A_ = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
64
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) A__ : List[Any] = { '''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''], '''processing_speech_to_text''': ['''Speech2TextProcessor'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : List[Any] = ['''Speech2TextTokenizer'''] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : List[Any] = ['''Speech2TextFeatureExtractor'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[Any] = [ '''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSpeech2TextForConditionalGeneration''', '''TFSpeech2TextModel''', '''TFSpeech2TextPreTrainedModel''', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Dict = [ '''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Speech2TextForConditionalGeneration''', '''Speech2TextModel''', '''Speech2TextPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys A__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
0
'''simple docstring''' import math def a_ ( _UpperCAmelCase : int ) -> list: __snake_case : Optional[Any] = [True] * n __snake_case : Optional[int] = False __snake_case : Dict = False __snake_case : List[Any] = True for i in range(3 ,int(n**0.5 + 1 ) ,2 ): __snake_case : Optional[int] = i * 2 while index < n: __snake_case : Union[str, Any] = False __snake_case : int = index + i __snake_case : Dict = [2] for i in range(3 ,_UpperCAmelCase ,2 ): if is_prime[i]: primes.append(_UpperCAmelCase ) return primes def a_ ( _UpperCAmelCase : int = 99_99_66_66_33_33 ) -> int: __snake_case : List[Any] = math.floor(math.sqrt(_UpperCAmelCase ) ) + 1_00 __snake_case : Tuple = prime_sieve(_UpperCAmelCase ) __snake_case : List[Any] = 0 __snake_case : List[Any] = 0 __snake_case : Optional[int] = primes[prime_index] while (last_prime**2) <= limit: __snake_case : Optional[int] = primes[prime_index + 1] __snake_case : Union[str, Any] = last_prime**2 __snake_case : Dict = next_prime**2 # Get numbers divisible by lps(current) __snake_case : Optional[Any] = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) __snake_case : Optional[Any] = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps __snake_case : List[str] = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair __snake_case : Dict = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
0
1
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() __A =logging.get_logger(__name__) __A =[ ['''attention''', '''attn'''], ['''encoder_attention''', '''encoder_attn'''], ['''q_lin''', '''q_proj'''], ['''k_lin''', '''k_proj'''], ['''v_lin''', '''v_proj'''], ['''out_lin''', '''out_proj'''], ['''norm_embeddings''', '''layernorm_embedding'''], ['''position_embeddings''', '''embed_positions'''], ['''embeddings''', '''embed_tokens'''], ['''ffn.lin''', '''fc'''], ] def lowerCamelCase_ ( lowerCamelCase__ ): if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: lowerCamelCase_ = k.replace(lowerCamelCase__ , lowerCamelCase__ ) if k.startswith("encoder" ): lowerCamelCase_ = k.replace(".attn" , ".self_attn" ) lowerCamelCase_ = k.replace("norm1" , "self_attn_layer_norm" ) lowerCamelCase_ = k.replace("norm2" , "final_layer_norm" ) elif k.startswith("decoder" ): lowerCamelCase_ = k.replace("norm1" , "self_attn_layer_norm" ) lowerCamelCase_ = k.replace("norm2" , "encoder_attn_layer_norm" ) lowerCamelCase_ = k.replace("norm3" , "final_layer_norm" ) return k def lowerCamelCase_ ( lowerCamelCase__ ): lowerCamelCase_ = [ "model.encoder.layernorm_embedding.weight", "model.encoder.layernorm_embedding.bias", "model.decoder.layernorm_embedding.weight", "model.decoder.layernorm_embedding.bias", ] for k in keys: lowerCamelCase_ = sd.pop(lowerCamelCase__ ) lowerCamelCase_ = k.replace("layernorm_embedding" , "layer_norm" ) assert new_k not in sd lowerCamelCase_ = v __A =['''START'''] @torch.no_grad() def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" ) lowerCamelCase_ = model["model"] lowerCamelCase_ = BlenderbotConfig.from_json_file(lowerCamelCase__ ) lowerCamelCase_ = BlenderbotForConditionalGeneration(lowerCamelCase__ ) lowerCamelCase_ = m.model.state_dict().keys() lowerCamelCase_ = [] lowerCamelCase_ = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue lowerCamelCase_ = rename_state_dict_key(lowerCamelCase__ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: lowerCamelCase_ = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(lowerCamelCase__ ) m.model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ ) m.half() m.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": __A =argparse.ArgumentParser() # Required parameters parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''') parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''') parser.add_argument( '''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use''' ) __A =parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
19
import argparse import math import traceback import dateutil.parser as date_parser import requests def UpperCAmelCase ( a_ ) -> str: """simple docstring""" __A = {} __A = job["started_at"] __A = job["completed_at"] __A = date_parser.parse(a_ ) __A = date_parser.parse(a_ ) __A = round((end_datetime - start_datetime).total_seconds() / 60.0 ) __A = start __A = end __A = duration_in_min return job_info def UpperCAmelCase ( a_ , a_=None ) -> str: """simple docstring""" __A = None if token is not None: __A = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} __A = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' __A = requests.get(a_ , headers=a_ ).json() __A = {} try: job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} ) __A = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 ) for i in range(a_ ): __A = requests.get(url + F'''&page={i + 2}''' , headers=a_ ).json() job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args() SCREAMING_SNAKE_CASE :Union[str, Any] = get_job_time(args.workflow_run_id) SCREAMING_SNAKE_CASE :Optional[int] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f'''{k}: {v["duration"]}''')
15
0
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller lowerCAmelCase :Optional[int] = 3 def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" print('Generating primitive root of p' ) while True: __magic_name__ : List[Any] = random.randrange(3 , lowerCAmelCase ) if pow(lowerCAmelCase , 2 , lowerCAmelCase ) == 1: continue if pow(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) == 1: continue return g def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" print('Generating prime p...' ) __magic_name__ : str = rabin_miller.generate_large_prime(lowerCAmelCase ) # select large prime number. __magic_name__ : Dict = primitive_root(lowerCAmelCase ) # one primitive root on modulo p. __magic_name__ : Tuple = random.randrange(3 , lowerCAmelCase ) # private_key -> have to be greater than 2 for safety. __magic_name__ : Optional[int] = cryptomath.find_mod_inverse(pow(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ) __magic_name__ : Dict = (key_size, e_a, e_a, p) __magic_name__ : str = (key_size, d) return public_key, private_key def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : int ): """simple docstring""" if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ): print('\nWARNING:' ) print( f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' 'Use a different name or delete these files and re-run this program.' ) sys.exit() __magic_name__ , __magic_name__ : Any = generate_key(lowerCAmelCase ) print(f'\nWriting public key to file {name}_pubkey.txt...' ) with open(f'{name}_pubkey.txt' , 'w' ) as fo: fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' ) print(f'Writing private key to file {name}_privkey.txt...' ) with open(f'{name}_privkey.txt' , 'w' ) as fo: fo.write(f'{private_key[0]},{private_key[1]}' ) def lowerCamelCase ( ): """simple docstring""" print('Making key files...' ) make_key_files('elgamal' , 2048 ) print('Key files generation successful' ) if __name__ == "__main__": main()
275
'''simple docstring''' import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor lowerCAmelCase :Tuple = logging.get_logger(__name__) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Any , *_A : List[Any] , **_A : str ) -> None: warnings.warn( 'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use BeitImageProcessor instead.' , _A , ) super().__init__(*_A , **_A )
275
1
"""simple docstring""" from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput A = 8 def __A ( a_ :str , a_ :Any=BITS) -> Optional[Any]: __a : List[Any] = x.device __a : List[Any] = (x * 2_55).int().clamp(0 , 2_55) __a : List[Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__lowerCamelCase) __a : Tuple = rearrange(__lowerCamelCase , '''d -> d 1 1''') __a : Optional[int] = rearrange(__lowerCamelCase , '''b c h w -> b c 1 h w''') __a : Tuple = ((x & mask) != 0).float() __a : Dict = rearrange(__lowerCamelCase , '''b c d h w -> b (c d) h w''') __a : Optional[Any] = bits * 2 - 1 return bits def __A ( a_ :Dict , a_ :str=BITS) -> List[Any]: __a : str = x.device __a : Tuple = (x > 0).int() __a : Optional[Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__lowerCamelCase , dtype=torch.intaa) __a : List[Any] = rearrange(__lowerCamelCase , '''d -> d 1 1''') __a : Tuple = rearrange(__lowerCamelCase , '''b (c d) h w -> b c d h w''' , d=8) __a : Union[str, Any] = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''') return (dec / 2_55).clamp(0.0 , 1.0) def __A ( self :str , a_ :Union[str, Any] , a_ :Optional[int] , a_ :Dict , a_ :Any = 0.0 , a_ :Optional[Any] = True , a_ :Any=None , a_ :int = True , ) -> Union[DDIMSchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''') # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) __a : Union[str, Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas __a : Tuple = self.alphas_cumprod[timestep] __a : Tuple = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod __a : str = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __a : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" __a : Dict = self.bit_scale if self.config.clip_sample: __a : Union[str, Any] = torch.clamp(__lowerCamelCase , -scale , __lowerCamelCase) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) __a : Optional[Any] = self._get_variance(__lowerCamelCase , __lowerCamelCase) __a : Optional[int] = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide __a : Any = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __a : Dict = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __a : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 __a : Union[str, Any] = model_output.device if torch.is_tensor(__lowerCamelCase) else '''cpu''' __a : Union[str, Any] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__lowerCamelCase).to(__lowerCamelCase) __a : Dict = self._get_variance(__lowerCamelCase , __lowerCamelCase) ** 0.5 * eta * noise __a : List[Any] = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=__lowerCamelCase , pred_original_sample=__lowerCamelCase) def __A ( self :int , a_ :str , a_ :Union[str, Any] , a_ :Any , a_ :Dict="epsilon" , a_ :Union[str, Any]=None , a_ :str = True , ) -> Union[DDPMSchedulerOutput, Tuple]: __a : List[str] = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: __a : Optional[Any] = torch.split(__lowerCamelCase , sample.shape[1] , dim=1) else: __a : Optional[int] = None # 1. compute alphas, betas __a : Tuple = self.alphas_cumprod[t] __a : Tuple = self.alphas_cumprod[t - 1] if t > 0 else self.one __a : Optional[Any] = 1 - alpha_prod_t __a : Union[str, Any] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": __a : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": __a : Optional[int] = model_output else: raise ValueError(F"""Unsupported prediction_type {prediction_type}.""") # 3. Clip "predicted x_0" __a : Tuple = self.bit_scale if self.config.clip_sample: __a : Union[str, Any] = torch.clamp(__lowerCamelCase , -scale , __lowerCamelCase) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __a : Any = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t __a : int = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __a : List[str] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise __a : Any = 0 if t > 0: __a : List[Any] = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__lowerCamelCase).to(model_output.device) __a : Dict = (self._get_variance(__lowerCamelCase , predicted_variance=__lowerCamelCase) ** 0.5) * noise __a : str = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=__lowerCamelCase , pred_original_sample=__lowerCamelCase) class __lowercase ( A_ ): '''simple docstring''' def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1.0 , ): super().__init__() __a : Optional[Any] = bit_scale __a : Dict = ( ddim_bit_scheduler_step if isinstance(_snake_case , _snake_case ) else ddpm_bit_scheduler_step ) self.register_modules(unet=_snake_case , scheduler=_snake_case ) @torch.no_grad() def __call__( self , _UpperCAmelCase = 256 , _UpperCAmelCase = 256 , _UpperCAmelCase = 50 , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , **_UpperCAmelCase , ): __a : Dict = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=_snake_case , ) __a : Dict = decimal_to_bits(_snake_case ) * self.bit_scale __a : Optional[Any] = latents.to(self.device ) self.scheduler.set_timesteps(_snake_case ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual __a : List[Any] = self.unet(_snake_case , _snake_case ).sample # compute the previous noisy sample x_t -> x_t-1 __a : Any = self.scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample __a : str = bits_to_decimal(_snake_case ) if output_type == "pil": __a : str = self.numpy_to_pil(_snake_case ) if not return_dict: return (image,) return ImagePipelineOutput(images=_snake_case )
160
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __A : '''simple docstring''' def __init__( self : str ,_snake_case : List[Any] ,_snake_case : Optional[int]=3 ,_snake_case : Optional[int]=32 ,_snake_case : Union[str, Any]=3 ,_snake_case : int=10 ,_snake_case : List[str]=[10, 20, 30, 40] ,_snake_case : Any=[1, 1, 2, 1] ,_snake_case : int=True ,_snake_case : Optional[Any]=True ,_snake_case : Union[str, Any]="relu" ,_snake_case : Dict=3 ,_snake_case : Any=None ,) -> str: """simple docstring""" lowercase__ : int = parent lowercase__ : Optional[Any] = batch_size lowercase__ : Optional[Any] = image_size lowercase__ : Optional[Any] = num_channels lowercase__ : Optional[Any] = embeddings_size lowercase__ : Optional[Any] = hidden_sizes lowercase__ : str = depths lowercase__ : Tuple = is_training lowercase__ : List[Any] = use_labels lowercase__ : Union[str, Any] = hidden_act lowercase__ : Union[str, Any] = num_labels lowercase__ : Tuple = scope lowercase__ : Optional[Any] = len(_snake_case ) def UpperCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Tuple = None if self.use_labels: lowercase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels ) lowercase__ : int = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" return ResNetConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,) def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : Tuple ) -> List[Any]: """simple docstring""" lowercase__ : Optional[int] = TFResNetModel(config=_snake_case ) lowercase__ : List[str] = model(_snake_case ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : int ,_snake_case : Any ) -> Tuple: """simple docstring""" lowercase__ : Tuple = self.num_labels lowercase__ : Union[str, Any] = TFResNetForImageClassification(_snake_case ) lowercase__ : List[str] = model(_snake_case ,labels=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCAmelCase ( self : Tuple ) -> str: """simple docstring""" lowercase__ : Dict = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs lowercase__ : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class __A ( A_ ,A_ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () lowerCAmelCase : Any = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) lowerCAmelCase : List[Any] = False lowerCAmelCase : List[Any] = False lowerCAmelCase : int = False lowerCAmelCase : Union[str, Any] = False lowerCAmelCase : List[str] = False def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" lowercase__ : Optional[Any] = TFResNetModelTester(self ) lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case ) def UpperCAmelCase ( self : Optional[Any] ) -> str: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" return @unittest.skip(reason='''ResNet does not use inputs_embeds''' ) def UpperCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" pass @unittest.skip(reason='''ResNet does not support input and output embeddings''' ) def UpperCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass def UpperCAmelCase ( self : int ) -> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : str = model_class(_snake_case ) lowercase__ : Dict = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[int] = [*signature.parameters.keys()] lowercase__ : Any = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,_snake_case ) def UpperCAmelCase ( self : Tuple ) -> Any: """simple docstring""" lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCAmelCase ( self : Dict ) -> List[str]: """simple docstring""" def check_hidden_states_output(_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Optional[Any] ): lowercase__ : str = model_class(_snake_case ) lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) lowercase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ : Tuple = self.model_tester.num_stages self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase__ : List[Any] = layer_type lowercase__ : Dict = True check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Dict = True check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ) def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) @slow def UpperCAmelCase ( self : Optional[Any] ) -> int: """simple docstring""" for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Optional[Any] = TFResNetModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def __UpperCAmelCase ( ) -> Dict: lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class __A ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase ( self : str ) -> Any: """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowercase__ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowercase__ : Any = self.default_image_processor lowercase__ : int = prepare_img() lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''tf''' ) # forward pass lowercase__ : Dict = model(**_snake_case ) # verify the logits lowercase__ : List[str] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape ,_snake_case ) lowercase__ : Any = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_snake_case ,atol=1e-4 ) )
16
0
"""simple docstring""" import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging lowerCAmelCase_ = logging.get_logger(__name__) def __UpperCAmelCase ( ) -> Dict: # Get the sagemaker specific mp parameters from smp_options variable. lowercase__ : List[Any] = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. lowercase__ : Tuple = json.loads(__lowerCamelCase ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. lowercase__ : Optional[int] = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". lowercase__ : Union[str, Any] = json.loads(__lowerCamelCase ) if not mpi_options.get('''sagemaker_mpi_enabled''' , __lowerCamelCase ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec('''smdistributed''' ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class __A ( A_ ): '''simple docstring''' lowerCAmelCase : str = field( default="" ,metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} ,) def UpperCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" super().__post_init__() warnings.warn( '''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use ''' '''`TrainingArguments` instead.''' ,_snake_case ,) @cached_property def UpperCAmelCase ( self : Tuple ) -> "torch.device": """simple docstring""" logger.info('''PyTorch: setting up devices''' ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( '''torch.distributed process group is initialized, but local_rank == -1. ''' '''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' ) if self.no_cuda: lowercase__ : str = torch.device('''cpu''' ) lowercase__ : Union[str, Any] = 0 elif is_sagemaker_model_parallel_available(): lowercase__ : List[str] = smp.local_rank() lowercase__ : Tuple = torch.device('''cuda''' ,_snake_case ) lowercase__ : Optional[Any] = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend='''smddp''' ,timeout=self.ddp_timeout_delta ) lowercase__ : Tuple = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) ) lowercase__ : Any = torch.device('''cuda''' ,self.local_rank ) lowercase__ : Dict = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 lowercase__ : str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. lowercase__ : Tuple = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend='''nccl''' ,timeout=self.ddp_timeout_delta ) lowercase__ : Tuple = torch.device('''cuda''' ,self.local_rank ) lowercase__ : List[Any] = 1 if device.type == "cuda": torch.cuda.set_device(_snake_case ) return device @property def UpperCAmelCase ( self : List[str] ) -> Tuple: """simple docstring""" if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return not is_sagemaker_model_parallel_available() @property def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return False
357
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig lowerCAmelCase_ = [ 'openmmlab/upernet-convnext-tiny', # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring lowerCAmelCase_ = 'UperNetConfig' class __A ( nn.Module ): '''simple docstring''' def __init__( self : List[str] ,_snake_case : int ,_snake_case : int ,_snake_case : Union[int, Tuple[int, int]] ,_snake_case : Union[int, Tuple[int, int], str] = 0 ,_snake_case : bool = False ,_snake_case : Union[int, Tuple[int, int]] = 1 ,) -> None: """simple docstring""" super().__init__() lowercase__ : Optional[int] = nn.Convad( in_channels=_snake_case ,out_channels=_snake_case ,kernel_size=_snake_case ,padding=_snake_case ,bias=_snake_case ,dilation=_snake_case ,) lowercase__ : Tuple = nn.BatchNormad(_snake_case ) lowercase__ : List[str] = nn.ReLU() def UpperCAmelCase ( self : str ,_snake_case : torch.Tensor ) -> torch.Tensor: """simple docstring""" lowercase__ : Union[str, Any] = self.conv(_snake_case ) lowercase__ : List[str] = self.batch_norm(_snake_case ) lowercase__ : Tuple = self.activation(_snake_case ) return output class __A ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] ,_snake_case : int ,_snake_case : int ,_snake_case : int ) -> None: """simple docstring""" super().__init__() lowercase__ : List[Any] = [ nn.AdaptiveAvgPoolad(_snake_case ), UperNetConvModule(_snake_case ,_snake_case ,kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(_snake_case ) ,_snake_case ) def UpperCAmelCase ( self : Dict ,_snake_case : torch.Tensor ) -> torch.Tensor: """simple docstring""" lowercase__ : Any = input for layer in self.layers: lowercase__ : int = layer(_snake_case ) return hidden_state class __A ( nn.Module ): '''simple docstring''' def __init__( self : List[str] ,_snake_case : Tuple[int, ...] ,_snake_case : int ,_snake_case : int ,_snake_case : bool ) -> None: """simple docstring""" super().__init__() lowercase__ : int = pool_scales lowercase__ : Dict = align_corners lowercase__ : Optional[Any] = in_channels lowercase__ : Optional[Any] = channels lowercase__ : int = [] for i, pool_scale in enumerate(_snake_case ): lowercase__ : Optional[Any] = UperNetPyramidPoolingBlock(pool_scale=_snake_case ,in_channels=_snake_case ,channels=_snake_case ) self.blocks.append(_snake_case ) self.add_module(str(_snake_case ) ,_snake_case ) def UpperCAmelCase ( self : Any ,_snake_case : torch.Tensor ) -> List[torch.Tensor]: """simple docstring""" lowercase__ : int = [] for ppm in self.blocks: lowercase__ : Any = ppm(_snake_case ) lowercase__ : int = nn.functional.interpolate( _snake_case ,size=x.size()[2:] ,mode='''bilinear''' ,align_corners=self.align_corners ) ppm_outs.append(_snake_case ) return ppm_outs class __A ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Union[str, Any] ) -> str: """simple docstring""" super().__init__() lowercase__ : str = config lowercase__ : Optional[Any] = config.pool_scales # e.g. (1, 2, 3, 6) lowercase__ : Optional[Any] = in_channels lowercase__ : Any = config.hidden_size lowercase__ : Optional[Any] = False lowercase__ : Optional[int] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 ) # PSP Module lowercase__ : Dict = UperNetPyramidPoolingModule( self.pool_scales ,self.in_channels[-1] ,self.channels ,align_corners=self.align_corners ,) lowercase__ : str = UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,) # FPN Module lowercase__ : Any = nn.ModuleList() lowercase__ : Union[str, Any] = nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer lowercase__ : List[Any] = UperNetConvModule(_snake_case ,self.channels ,kernel_size=1 ) lowercase__ : Optional[int] = UperNetConvModule(self.channels ,self.channels ,kernel_size=3 ,padding=1 ) self.lateral_convs.append(_snake_case ) self.fpn_convs.append(_snake_case ) lowercase__ : int = UperNetConvModule( len(self.in_channels ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,) def UpperCAmelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" self.apply(self._init_weights ) def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[Any] ) -> List[str]: """simple docstring""" if isinstance(_snake_case ,nn.Convad ): module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[Any] ) -> str: """simple docstring""" lowercase__ : Dict = inputs[-1] lowercase__ : Optional[int] = [x] psp_outs.extend(self.psp_modules(_snake_case ) ) lowercase__ : Optional[Any] = torch.cat(_snake_case ,dim=1 ) lowercase__ : List[str] = self.bottleneck(_snake_case ) return output def UpperCAmelCase ( self : List[str] ,_snake_case : torch.Tensor ) -> torch.Tensor: """simple docstring""" lowercase__ : Tuple = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(_snake_case ) ) # build top-down path lowercase__ : List[Any] = len(_snake_case ) for i in range(used_backbone_levels - 1 ,0 ,-1 ): lowercase__ : Union[str, Any] = laterals[i - 1].shape[2:] lowercase__ : int = laterals[i - 1] + nn.functional.interpolate( laterals[i] ,size=_snake_case ,mode='''bilinear''' ,align_corners=self.align_corners ) # build outputs lowercase__ : List[str] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1 ,0 ,-1 ): lowercase__ : Any = nn.functional.interpolate( fpn_outs[i] ,size=fpn_outs[0].shape[2:] ,mode='''bilinear''' ,align_corners=self.align_corners ) lowercase__ : Any = torch.cat(_snake_case ,dim=1 ) lowercase__ : Any = self.fpn_bottleneck(_snake_case ) lowercase__ : str = self.classifier(_snake_case ) return output class __A ( nn.Module ): '''simple docstring''' def __init__( self : Dict ,_snake_case : List[Any] ,_snake_case : int = 2 ,_snake_case : int = 3 ,_snake_case : Union[int, Tuple[int, int]] = 1 ) -> None: """simple docstring""" super().__init__() lowercase__ : int = config lowercase__ : Dict = config.auxiliary_in_channels lowercase__ : Optional[int] = config.auxiliary_channels lowercase__ : List[Any] = config.auxiliary_num_convs lowercase__ : List[Any] = config.auxiliary_concat_input lowercase__ : str = in_index lowercase__ : Any = (kernel_size // 2) * dilation lowercase__ : Optional[Any] = [] convs.append( UperNetConvModule( self.in_channels ,self.channels ,kernel_size=_snake_case ,padding=_snake_case ,dilation=_snake_case ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels ,self.channels ,kernel_size=_snake_case ,padding=_snake_case ,dilation=_snake_case ) ) if self.num_convs == 0: lowercase__ : List[str] = nn.Identity() else: lowercase__ : Dict = nn.Sequential(*_snake_case ) if self.concat_input: lowercase__ : int = UperNetConvModule( self.in_channels + self.channels ,self.channels ,kernel_size=_snake_case ,padding=kernel_size // 2 ) lowercase__ : List[str] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 ) def UpperCAmelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" self.apply(self._init_weights ) def UpperCAmelCase ( self : List[Any] ,_snake_case : List[Any] ) -> Dict: """simple docstring""" if isinstance(_snake_case ,nn.Convad ): module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def UpperCAmelCase ( self : List[str] ,_snake_case : torch.Tensor ) -> torch.Tensor: """simple docstring""" lowercase__ : str = encoder_hidden_states[self.in_index] lowercase__ : List[str] = self.convs(_snake_case ) if self.concat_input: lowercase__ : Any = self.conv_cat(torch.cat([hidden_states, output] ,dim=1 ) ) lowercase__ : Dict = self.classifier(_snake_case ) return output class __A ( A_ ): '''simple docstring''' lowerCAmelCase : Any = UperNetConfig lowerCAmelCase : str = "pixel_values" lowerCAmelCase : Dict = True def UpperCAmelCase ( self : int ,_snake_case : str ) -> Optional[int]: """simple docstring""" if isinstance(_snake_case ,_snake_case ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def UpperCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def UpperCAmelCase ( self : int ,_snake_case : str ,_snake_case : str=False ) -> List[str]: """simple docstring""" if isinstance(_snake_case ,_snake_case ): lowercase__ : List[Any] = value lowerCAmelCase_ = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." ,A_ ,) class __A ( A_ ): '''simple docstring''' def __init__( self : Optional[Any] ,_snake_case : Tuple ) -> int: """simple docstring""" super().__init__(_snake_case ) lowercase__ : int = AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) lowercase__ : Any = UperNetHead(_snake_case ,in_channels=self.backbone.channels ) lowercase__ : str = UperNetFCNHead(_snake_case ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) ) @replace_return_docstrings(output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ) def UpperCAmelCase ( self : Dict ,_snake_case : Optional[torch.Tensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[torch.Tensor] = None ,_snake_case : Optional[bool] = None ,) -> Union[tuple, SemanticSegmenterOutput]: """simple docstring""" lowercase__ : int = return_dict if return_dict is not None else self.config.use_return_dict lowercase__ : Any = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase__ : Any = output_attentions if output_attentions is not None else self.config.output_attentions lowercase__ : Optional[Any] = self.backbone.forward_with_filtered_kwargs( _snake_case ,output_hidden_states=_snake_case ,output_attentions=_snake_case ) lowercase__ : Optional[int] = outputs.feature_maps lowercase__ : Tuple = self.decode_head(_snake_case ) lowercase__ : Optional[int] = nn.functional.interpolate(_snake_case ,size=pixel_values.shape[2:] ,mode='''bilinear''' ,align_corners=_snake_case ) lowercase__ : List[str] = None if self.auxiliary_head is not None: lowercase__ : str = self.auxiliary_head(_snake_case ) lowercase__ : Dict = nn.functional.interpolate( _snake_case ,size=pixel_values.shape[2:] ,mode='''bilinear''' ,align_corners=_snake_case ) lowercase__ : Any = None if labels is not None: if self.config.num_labels == 1: raise ValueError('''The number of labels should be greater than one''' ) else: # compute weighted loss lowercase__ : Union[str, Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) lowercase__ : List[str] = loss_fct(_snake_case ,_snake_case ) lowercase__ : List[str] = loss_fct(_snake_case ,_snake_case ) lowercase__ : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: lowercase__ : Tuple = (logits,) + outputs[1:] else: lowercase__ : int = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,)
302
0
from __future__ import annotations class __SCREAMING_SNAKE_CASE : def __init__( self : Any , A : Union[str, Any]=None ) ->List[str]: lowerCamelCase__ : Any = data lowerCamelCase__ : Optional[Any] = None def __repr__( self : str ) ->List[str]: lowerCamelCase__ : int = [] lowerCamelCase__ : Any = self while temp: string_rep.append(F"{temp.data}" ) lowerCamelCase__ : int = temp.next return "->".join(A ) def _a ( UpperCAmelCase ) -> Any: """simple docstring""" if not elements_list: raise Exception('''The Elements List is empty''' ) lowerCamelCase__ : List[Any] = Node(elements_list[0] ) for i in range(1 , len(UpperCAmelCase ) ): lowerCamelCase__ : int = Node(elements_list[i] ) lowerCamelCase__ : Optional[Any] = current.next return head def _a ( UpperCAmelCase ) -> None: """simple docstring""" if head_node is not None and isinstance(UpperCAmelCase , UpperCAmelCase ): print_reverse(head_node.next ) print(head_node.data ) def _a ( ) -> Optional[int]: """simple docstring""" from doctest import testmod testmod() lowerCamelCase__ : List[Any] = make_linked_list([14, 52, 14, 12, 43] ) print('''Linked List:''' ) print(UpperCAmelCase ) print('''Elements in Reverse:''' ) print_reverse(UpperCAmelCase ) if __name__ == "__main__": main()
142
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor _A : str = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): def __init__( self : Dict , *A : Any , **A : List[Any] ) ->None: warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''' , A , ) super().__init__(*A , **A )
142
1
"""simple docstring""" import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests lowerCAmelCase__ = open # noqa: we just need to have a builtin inside this module to test it properly
363
"""simple docstring""" import baseaa def a__ ( SCREAMING_SNAKE_CASE : str ): '''simple docstring''' return baseaa.baaencode(string.encode("utf-8" ) ) def a__ ( SCREAMING_SNAKE_CASE : bytes ): '''simple docstring''' return baseaa.baadecode(SCREAMING_SNAKE_CASE ).decode("utf-8" ) if __name__ == "__main__": lowerCAmelCase__ = '''Hello World!''' lowerCAmelCase__ = baseaa_encode(test) print(encoded) lowerCAmelCase__ = baseaa_decode(encoded) print(decoded)
133
0
"""simple docstring""" UpperCAmelCase : Any = { 0: "0", 1: "1", 2: "2", 3: "3", 4: "4", 5: "5", 6: "6", 7: "7", 8: "8", 9: "9", 10: "a", 11: "b", 12: "c", 13: "d", 14: "e", 15: "f", } def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]: '''simple docstring''' assert type(__lowerCamelCase ) in (int, float) and decimal == int(__lowerCamelCase ) lowercase_ = int(__lowerCamelCase ) lowercase_ = "" lowercase_ = False if decimal < 0: lowercase_ = True decimal *= -1 while decimal > 0: lowercase_ = divmod(__lowerCamelCase , 16 ) lowercase_ = values[remainder] + hexadecimal lowercase_ = "0x" + hexadecimal if negative: lowercase_ = "-" + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
136
"""simple docstring""" import argparse from collections import defaultdict def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : int = f"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__lowerCamelCase, "r" ) as f: UpperCAmelCase_ : List[Any] = f.readlines() UpperCAmelCase_ : int = f"""class {class_name}(""" UpperCAmelCase_ : Optional[Any] = f"""{4 * " "}def {test_name}(""" UpperCAmelCase_ : Optional[Any] = f"""{8 * " "}{correct_line.split()[0]}""" UpperCAmelCase_ : Tuple = f"""{16 * " "}{correct_line.split()[0]}""" UpperCAmelCase_ : int = False UpperCAmelCase_ : Union[str, Any] = False UpperCAmelCase_ : str = False UpperCAmelCase_ : Optional[Any] = False UpperCAmelCase_ : List[str] = 0 UpperCAmelCase_ : Optional[int] = 0 UpperCAmelCase_ : int = [] for line in lines: if line.startswith(__lowerCamelCase ): UpperCAmelCase_ : Tuple = True elif in_class and line.startswith(__lowerCamelCase ): UpperCAmelCase_ : Optional[int] = True elif in_class and in_func and (line.startswith(__lowerCamelCase ) or line.startswith(__lowerCamelCase )): UpperCAmelCase_ : Any = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: UpperCAmelCase_ : Union[str, Any] = True if in_class and in_func and in_line: if ")" not in line: continue else: UpperCAmelCase_ : Any = True if in_class and in_func and in_line and insert_line: new_lines.append(f"""{spaces * " "}{correct_line}""" ) UpperCAmelCase_ : int = False else: new_lines.append(__lowerCamelCase ) with open(__lowerCamelCase, "w" ) as f: for line in new_lines: f.write(__lowerCamelCase ) def __a ( __lowerCamelCase, __lowerCamelCase=None ): if fail is not None: with open(__lowerCamelCase, "r" ) as f: UpperCAmelCase_ : Tuple = {l.strip() for l in f.readlines()} else: UpperCAmelCase_ : str = None with open(__lowerCamelCase, "r" ) as f: UpperCAmelCase_ : Optional[int] = f.readlines() UpperCAmelCase_ : Any = defaultdict(__lowerCamelCase ) for line in correct_lines: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = line.split(";" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) if __name__ == "__main__": _a = argparse.ArgumentParser() parser.add_argument('--correct_filename', help='filename of tests with expected result') parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None) _a = parser.parse_args() main(args.correct_filename, args.fail_filename)
61
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase_ = { 'configuration_owlvit': [ 'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OwlViTConfig', 'OwlViTOnnxConfig', 'OwlViTTextConfig', 'OwlViTVisionConfig', ], 'processing_owlvit': ['OwlViTProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['OwlViTFeatureExtractor'] lowerCAmelCase_ = ['OwlViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OwlViTModel', 'OwlViTPreTrainedModel', 'OwlViTTextModel', 'OwlViTVisionModel', 'OwlViTForObjectDetection', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
362
"""simple docstring""" from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig lowerCAmelCase_ = logging.get_logger(__name__) # General docstring lowerCAmelCase_ = 'RegNetConfig' # Base docstring lowerCAmelCase_ = 'facebook/regnet-y-040' lowerCAmelCase_ = [1, 1_088, 7, 7] # Image classification docstring lowerCAmelCase_ = 'facebook/regnet-y-040' lowerCAmelCase_ = 'tabby, tabby cat' lowerCAmelCase_ = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class __A ( nn.Module ): '''simple docstring''' def __init__( self : int ,_snake_case : int ,_snake_case : int ,_snake_case : int = 3 ,_snake_case : int = 1 ,_snake_case : int = 1 ,_snake_case : Optional[str] = "relu" ,) -> Union[str, Any]: """simple docstring""" super().__init__() lowercase__ : Tuple = nn.Convad( _snake_case ,_snake_case ,kernel_size=_snake_case ,stride=_snake_case ,padding=kernel_size // 2 ,groups=_snake_case ,bias=_snake_case ,) lowercase__ : List[Any] = nn.BatchNormad(_snake_case ) lowercase__ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity() def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ) -> Optional[Any]: """simple docstring""" lowercase__ : Optional[Any] = self.convolution(_snake_case ) lowercase__ : Tuple = self.normalization(_snake_case ) lowercase__ : Tuple = self.activation(_snake_case ) return hidden_state class __A ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int] ,_snake_case : RegNetConfig ) -> Optional[Any]: """simple docstring""" super().__init__() lowercase__ : List[Any] = RegNetConvLayer( config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ) lowercase__ : str = config.num_channels def UpperCAmelCase ( self : int ,_snake_case : Dict ) -> str: """simple docstring""" lowercase__ : Union[str, Any] = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) lowercase__ : Optional[int] = self.embedder(_snake_case ) return hidden_state class __A ( nn.Module ): '''simple docstring''' def __init__( self : str ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ) -> Any: """simple docstring""" super().__init__() lowercase__ : List[str] = nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ,stride=_snake_case ,bias=_snake_case ) lowercase__ : Any = nn.BatchNormad(_snake_case ) def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ) -> Tensor: """simple docstring""" lowercase__ : Union[str, Any] = self.convolution(_snake_case ) lowercase__ : Optional[int] = self.normalization(_snake_case ) return hidden_state class __A ( nn.Module ): '''simple docstring''' def __init__( self : Tuple ,_snake_case : int ,_snake_case : int ) -> Dict: """simple docstring""" super().__init__() lowercase__ : Any = nn.AdaptiveAvgPoolad((1, 1) ) lowercase__ : Dict = nn.Sequential( nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.Sigmoid() ,) def UpperCAmelCase ( self : int ,_snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" lowercase__ : List[str] = self.pooler(_snake_case ) lowercase__ : Union[str, Any] = self.attention(_snake_case ) lowercase__ : List[str] = hidden_state * attention return hidden_state class __A ( nn.Module ): '''simple docstring''' def __init__( self : List[str] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> List[str]: """simple docstring""" super().__init__() lowercase__ : Tuple = in_channels != out_channels or stride != 1 lowercase__ : Optional[int] = max(1 ,out_channels // config.groups_width ) lowercase__ : str = ( RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity() ) lowercase__ : Optional[int] = nn.Sequential( RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,) lowercase__ : str = ACTaFN[config.hidden_act] def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ) -> List[str]: """simple docstring""" lowercase__ : Tuple = hidden_state lowercase__ : Union[str, Any] = self.layer(_snake_case ) lowercase__ : List[Any] = self.shortcut(_snake_case ) hidden_state += residual lowercase__ : Optional[int] = self.activation(_snake_case ) return hidden_state class __A ( nn.Module ): '''simple docstring''' def __init__( self : Tuple ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> Optional[int]: """simple docstring""" super().__init__() lowercase__ : List[Any] = in_channels != out_channels or stride != 1 lowercase__ : List[str] = max(1 ,out_channels // config.groups_width ) lowercase__ : Tuple = ( RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity() ) lowercase__ : str = nn.Sequential( RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetSELayer(_snake_case ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,) lowercase__ : Optional[Any] = ACTaFN[config.hidden_act] def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[int] ) -> Tuple: """simple docstring""" lowercase__ : str = hidden_state lowercase__ : Optional[Any] = self.layer(_snake_case ) lowercase__ : int = self.shortcut(_snake_case ) hidden_state += residual lowercase__ : str = self.activation(_snake_case ) return hidden_state class __A ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ,_snake_case : int = 2 ,) -> Dict: """simple docstring""" super().__init__() lowercase__ : Optional[Any] = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer lowercase__ : Optional[Any] = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( _snake_case ,_snake_case ,_snake_case ,stride=_snake_case ,) ,*[layer(_snake_case ,_snake_case ,_snake_case ) for _ in range(depth - 1 )] ,) def UpperCAmelCase ( self : Tuple ,_snake_case : int ) -> List[Any]: """simple docstring""" lowercase__ : List[str] = self.layers(_snake_case ) return hidden_state class __A ( nn.Module ): '''simple docstring''' def __init__( self : Dict ,_snake_case : RegNetConfig ) -> List[Any]: """simple docstring""" super().__init__() lowercase__ : str = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( _snake_case ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) ) lowercase__ : str = zip(config.hidden_sizes ,config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(_snake_case ,config.depths[1:] ): self.stages.append(RegNetStage(_snake_case ,_snake_case ,_snake_case ,depth=_snake_case ) ) def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ,_snake_case : bool = False ,_snake_case : bool = True ) -> BaseModelOutputWithNoAttention: """simple docstring""" lowercase__ : Dict = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowercase__ : int = hidden_states + (hidden_state,) lowercase__ : Any = stage_module(_snake_case ) if output_hidden_states: lowercase__ : Optional[int] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=_snake_case ,hidden_states=_snake_case ) class __A ( A_ ): '''simple docstring''' lowerCAmelCase : int = RegNetConfig lowerCAmelCase : List[Any] = "regnet" lowerCAmelCase : Optional[int] = "pixel_values" lowerCAmelCase : Union[str, Any] = True def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> List[Any]: """simple docstring""" if isinstance(_snake_case ,nn.Convad ): nn.init.kaiming_normal_(module.weight ,mode='''fan_out''' ,nonlinearity='''relu''' ) elif isinstance(_snake_case ,(nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight ,1 ) nn.init.constant_(module.bias ,0 ) def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Any=False ) -> Optional[int]: """simple docstring""" if isinstance(_snake_case ,_snake_case ): lowercase__ : str = value lowerCAmelCase_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." ,A_ ,) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class __A ( A_ ): '''simple docstring''' def __init__( self : Optional[Any] ,_snake_case : Any ) -> Tuple: """simple docstring""" super().__init__(_snake_case ) lowercase__ : Any = config lowercase__ : List[str] = RegNetEmbeddings(_snake_case ) lowercase__ : Any = RegNetEncoder(_snake_case ) lowercase__ : Dict = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_snake_case ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def UpperCAmelCase ( self : Dict ,_snake_case : Tensor ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: """simple docstring""" lowercase__ : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict lowercase__ : Union[str, Any] = self.embedder(_snake_case ) lowercase__ : List[Any] = self.encoder( _snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case ) lowercase__ : str = encoder_outputs[0] lowercase__ : Optional[int] = self.pooler(_snake_case ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_snake_case ,pooler_output=_snake_case ,hidden_states=encoder_outputs.hidden_states ,) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,A_ ,) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class __A ( A_ ): '''simple docstring''' def __init__( self : int ,_snake_case : Tuple ) -> Any: """simple docstring""" super().__init__(_snake_case ) lowercase__ : Optional[Any] = config.num_labels lowercase__ : int = RegNetModel(_snake_case ) # classification head lowercase__ : str = nn.Sequential( nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_snake_case ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def UpperCAmelCase ( self : List[Any] ,_snake_case : Optional[torch.FloatTensor] = None ,_snake_case : Optional[torch.LongTensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention: """simple docstring""" lowercase__ : Any = return_dict if return_dict is not None else self.config.use_return_dict lowercase__ : List[Any] = self.regnet(_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case ) lowercase__ : List[str] = outputs.pooler_output if return_dict else outputs[1] lowercase__ : Union[str, Any] = self.classifier(_snake_case ) lowercase__ : Optional[int] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowercase__ : List[Any] = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowercase__ : Dict = '''single_label_classification''' else: lowercase__ : Optional[int] = '''multi_label_classification''' if self.config.problem_type == "regression": lowercase__ : Union[str, Any] = MSELoss() if self.num_labels == 1: lowercase__ : List[Any] = loss_fct(logits.squeeze() ,labels.squeeze() ) else: lowercase__ : Tuple = loss_fct(_snake_case ,_snake_case ) elif self.config.problem_type == "single_label_classification": lowercase__ : Tuple = CrossEntropyLoss() lowercase__ : str = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowercase__ : Any = BCEWithLogitsLoss() lowercase__ : Union[str, Any] = loss_fct(_snake_case ,_snake_case ) if not return_dict: lowercase__ : Tuple = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states )
302
0
"""simple docstring""" import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCAmelCase_ : @staticmethod def _UpperCamelCase ( *__UpperCamelCase : int , **__UpperCamelCase : str ) -> Optional[Any]: pass @is_pipeline_test @require_vision @require_torch class UpperCAmelCase_ ( unittest.TestCase): snake_case__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ) -> int: _UpperCamelCase = pipeline( '''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) _UpperCamelCase = [ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ] return object_detector, examples def _UpperCamelCase ( self : int , __UpperCamelCase : str , __UpperCamelCase : Tuple ) -> Tuple: _UpperCamelCase = object_detector(examples[0] , threshold=0.0 ) _UpperCamelCase = len(__UpperCamelCase ) self.assertGreater(__UpperCamelCase , 0 ) self.assertEqual( __UpperCamelCase , [ { '''score''': ANY(__UpperCamelCase ), '''label''': ANY(__UpperCamelCase ), '''box''': {'''xmin''': ANY(__UpperCamelCase ), '''ymin''': ANY(__UpperCamelCase ), '''xmax''': ANY(__UpperCamelCase ), '''ymax''': ANY(__UpperCamelCase )}, } for i in range(__UpperCamelCase ) ] , ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]: pass @require_torch def _UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]: _UpperCamelCase = pipeline( '''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) _UpperCamelCase = object_detector( '''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.6_4 , ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, {'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}}, {'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, ] , ) _UpperCamelCase = object_detector( [ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ] , threshold=0.6_4 , ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ [ {'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, {'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}}, {'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, ] ] , ) @require_torch @slow def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]: _UpperCamelCase = pipeline('''zero-shot-object-detection''' ) _UpperCamelCase = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}}, ] , ) _UpperCamelCase = object_detector( [ { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, ] , ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}}, ], [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}}, ], ] , ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]: pass @require_torch @slow def _UpperCamelCase ( self : Union[str, Any] ) -> List[str]: _UpperCamelCase = 0.2 _UpperCamelCase = pipeline('''zero-shot-object-detection''' ) _UpperCamelCase = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__UpperCamelCase , ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, ] , ) @require_torch @slow def _UpperCamelCase ( self : Any ) -> Tuple: _UpperCamelCase = 2 _UpperCamelCase = pipeline('''zero-shot-object-detection''' ) _UpperCamelCase = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__UpperCamelCase , ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, ] , )
256
'''simple docstring''' import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class snake_case : """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase = 13 , UpperCamelCase = 64 , UpperCamelCase = 2 , UpperCamelCase = 3 , UpperCamelCase = 3 , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = 128 , UpperCamelCase=[16, 32, 64, 128] , UpperCamelCase = 7 , UpperCamelCase = 4 , UpperCamelCase = 37 , UpperCamelCase = "gelu" , UpperCamelCase = 0.1 , UpperCamelCase = 0.1 , UpperCamelCase = 10 , UpperCamelCase = 0.02 , UpperCamelCase = 2 , UpperCamelCase = 1 , UpperCamelCase = 128 , UpperCamelCase = [2, 2, 2, 2] , UpperCamelCase = 2 , UpperCamelCase = 2 , ): """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = patch_size lowerCamelCase_ = num_channels lowerCamelCase_ = is_training lowerCamelCase_ = use_labels lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = encoder_stride lowerCamelCase_ = num_attention_outputs lowerCamelCase_ = embed_dim lowerCamelCase_ = embed_dim + 1 lowerCamelCase_ = resolution lowerCamelCase_ = depths lowerCamelCase_ = hidden_sizes lowerCamelCase_ = dim lowerCamelCase_ = mlp_expansion_ratio def snake_case ( self ): """simple docstring""" lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = self.get_config() return config, pixel_values, labels def snake_case ( self ): """simple docstring""" return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFEfficientFormerModel(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , training=UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.type_sequence_label_size lowerCamelCase_ = TFEfficientFormerForImageClassification(UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , labels=UpperCamelCase , training=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase_ = 1 lowerCamelCase_ = TFEfficientFormerForImageClassification(UpperCamelCase ) lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ = model(UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs lowerCamelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class snake_case ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) _lowerCamelCase = ( { "feature-extraction": TFEfficientFormerModel, "image-classification": ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFEfficientFormerModelTester(self ) lowerCamelCase_ = ConfigTester( self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="EfficientFormer does not use inputs_embeds" ) def snake_case ( self ): """simple docstring""" pass @unittest.skip(reason="EfficientFormer does not support input and output embeddings" ) def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(UpperCamelCase ) lowerCamelCase_ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def snake_case ( self ): """simple docstring""" def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = model_class(UpperCamelCase ) lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) , training=UpperCamelCase ) lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase_ = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) if hasattr(self.model_tester , "encoder_seq_length" ): lowerCamelCase_ = self.model_tester.encoder_seq_length if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1: lowerCamelCase_ = seq_length * self.model_tester.chunk_length else: lowerCamelCase_ = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: lowerCamelCase_ = outputs.decoder_hidden_states self.asseretIsInstance(UpperCamelCase , (list, tuple) ) self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) lowerCamelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase ) lowerCamelCase_ = getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ): """simple docstring""" lowerCamelCase_ = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) @unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFEfficientFormerModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = True lowerCamelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase ) lowerCamelCase_ = getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase ) lowerCamelCase_ = getattr(self.model_tester , "key_length" , UpperCamelCase ) lowerCamelCase_ = getattr(self.model_tester , "chunk_length" , UpperCamelCase ) if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ): lowerCamelCase_ = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = True lowerCamelCase_ = model_class(UpperCamelCase ) lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) , training=UpperCamelCase ) lowerCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCamelCase_ = True lowerCamelCase_ = model_class(UpperCamelCase ) lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) , training=UpperCamelCase ) lowerCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def snake_case ( self ): """simple docstring""" # We use a simplified version of this test for EfficientFormer because it requires training=False # and Keras refuses to let us force that during functional construction lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model lowerCamelCase_ = model_class(UpperCamelCase ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes lowerCamelCase_ = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCamelCase ) for key, val in model.input_signature.items() if key in model.dummy_inputs } lowerCamelCase_ = model(UpperCamelCase ) self.assertTrue(outputs_dict is not None ) def __snake_case ( ): lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case ( self ): """simple docstring""" return ( EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" ) if is_vision_available() else None ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" ) lowerCamelCase_ = self.default_image_processor lowerCamelCase_ = prepare_img() lowerCamelCase_ = image_processor(images=UpperCamelCase , return_tensors="tf" ) # forward pass lowerCamelCase_ = model(**UpperCamelCase , training=UpperCamelCase ) # verify the logits lowerCamelCase_ = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) lowerCamelCase_ = tf.constant([-0.0_555, 0.4_825, -0.0_852] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( "snap-research/efficientformer-l1-300" ) lowerCamelCase_ = self.default_image_processor lowerCamelCase_ = prepare_img() lowerCamelCase_ = image_processor(images=UpperCamelCase , return_tensors="tf" ) # forward pass lowerCamelCase_ = model(**UpperCamelCase , training=UpperCamelCase ) # verify the logits lowerCamelCase_ = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) lowerCamelCase_ = tf.constant([-0.1_312, 0.4_353, -1.0_499] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
55
0
"""simple docstring""" from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging __magic_name__ = logging.get_logger(__name__) def _lowerCAmelCase ( UpperCamelCase_ ): if isinstance(_UpperCAmelCase , np.ndarray ): return list(tensor.shape ) __SCREAMING_SNAKE_CASE = tf.shape(_UpperCAmelCase ) if tensor.shape == tf.TensorShape(_UpperCAmelCase ): return dynamic __SCREAMING_SNAKE_CASE = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCAmelCase )] def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None ): return tf.nn.softmax(logits=logits + 1e-9 , axis=_UpperCAmelCase , name=_UpperCAmelCase ) def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=1e-5 , UpperCamelCase_=-1 ): # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" ) # Get mean and variance on the axis to be normalized __SCREAMING_SNAKE_CASE = tf.nn.moments(_UpperCAmelCase , axes=[axis] , keepdims=_UpperCAmelCase ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis __SCREAMING_SNAKE_CASE = [1] * inputs.shape.rank __SCREAMING_SNAKE_CASE = shape_list(_UpperCAmelCase )[axis] __SCREAMING_SNAKE_CASE = tf.reshape(_UpperCAmelCase , _UpperCAmelCase ) __SCREAMING_SNAKE_CASE = tf.reshape(_UpperCAmelCase , _UpperCAmelCase ) # Compute layer normalization using the batch_normalization # function. __SCREAMING_SNAKE_CASE = tf.nn.batch_normalization( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , offset=_UpperCAmelCase , scale=_UpperCAmelCase , variance_epsilon=_UpperCAmelCase , ) return outputs def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=0 , UpperCamelCase_=-1 ): # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input __SCREAMING_SNAKE_CASE = tf.shape(_UpperCAmelCase ) __SCREAMING_SNAKE_CASE = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) __SCREAMING_SNAKE_CASE = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(_UpperCAmelCase , _UpperCAmelCase ) def _lowerCAmelCase ( UpperCamelCase_ ): if not isinstance(_UpperCAmelCase , tf.Tensor ): __SCREAMING_SNAKE_CASE = tf.convert_to_tensor(_UpperCAmelCase ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: __SCREAMING_SNAKE_CASE = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: __SCREAMING_SNAKE_CASE = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) __SCREAMING_SNAKE_CASE = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = "input_ids" ): tf.debugging.assert_less( _UpperCAmelCase , tf.cast(_UpperCAmelCase , dtype=tensor.dtype ) , message=( f"The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCAmelCase )}) must be smaller than the embedding " f"layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time." ) , ) def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = 6_4512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. __SCREAMING_SNAKE_CASE = [x for x in data if len(_UpperCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} " f"bytes: {bad_attributes}" ) __SCREAMING_SNAKE_CASE = np.asarray(_UpperCAmelCase ) __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = np.array_split(_UpperCAmelCase , _UpperCAmelCase ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 __SCREAMING_SNAKE_CASE = np.array_split(_UpperCAmelCase , _UpperCAmelCase ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(_UpperCAmelCase ): __SCREAMING_SNAKE_CASE = chunk_data else: __SCREAMING_SNAKE_CASE = data def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ): if name in group.attrs: __SCREAMING_SNAKE_CASE = [n.decode("""utf8""" ) if hasattr(_UpperCAmelCase , """decode""" ) else n for n in group.attrs[name]] else: __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""" ) if hasattr(_UpperCAmelCase , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] ) chunk_id += 1 return data def _lowerCAmelCase ( UpperCamelCase_ ): def _expand_single_ad_tensor(UpperCamelCase_ ): if isinstance(_UpperCAmelCase , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(_UpperCAmelCase , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , _UpperCAmelCase )
350
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" @slow def snake_case_ ( self): __SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""") __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""google/mt5-small""") __SCREAMING_SNAKE_CASE = tokenizer("""Hello there""" , return_tensors="""tf""").input_ids __SCREAMING_SNAKE_CASE = tokenizer("""Hi I am""" , return_tensors="""tf""").input_ids __SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , labels=lowerCAmelCase__).loss __SCREAMING_SNAKE_CASE = -tf.math.reduce_mean(lowerCAmelCase__).numpy() __SCREAMING_SNAKE_CASE = -21.22_81_68 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 2E-4)
255
0
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ :Any = logging.get_logger(__name__) lowerCAmelCase__ :List[str] = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } lowerCAmelCase__ :Tuple = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } lowerCAmelCase__ :Optional[Any] = {'''facebook/blenderbot_small-90M''': 5_1_2} def lowerCAmelCase__ ( a__: Optional[int] ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = set() _UpperCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _UpperCAmelCase = char _UpperCAmelCase = set(a__ ) return pairs class __a ( UpperCAmelCase ): _a : Any = VOCAB_FILES_NAMES _a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a : Optional[int] = ['input_ids', 'attention_mask'] def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="__start__" , _SCREAMING_SNAKE_CASE="__end__" , _SCREAMING_SNAKE_CASE="__unk__" , _SCREAMING_SNAKE_CASE="__null__" , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" super().__init__(unk_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as vocab_handle: _UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = {v: k for k, v in self.encoder.items()} with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as merges_handle: _UpperCAmelCase = merges_handle.read().split('\n' )[1:-1] _UpperCAmelCase = [tuple(merge.split() ) for merge in merges] _UpperCAmelCase = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) ) _UpperCAmelCase = {} @property def UpperCAmelCase__ ( self ) -> int: """simple docstring""" return len(self.encoder ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" if token in self.cache: return self.cache[token] _UpperCAmelCase = re.sub('([.,!?()])' , R' \1' , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = re.sub('(\')' , R' \1 ' , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = re.sub(R'\s{2,}' , ' ' , _SCREAMING_SNAKE_CASE ) if "\n" in token: _UpperCAmelCase = token.replace('\n' , ' __newln__' ) _UpperCAmelCase = token.split(' ' ) _UpperCAmelCase = [] for token in tokens: if not len(_SCREAMING_SNAKE_CASE ): continue _UpperCAmelCase = token.lower() _UpperCAmelCase = tuple(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '</w>'] ) _UpperCAmelCase = get_pairs(_SCREAMING_SNAKE_CASE ) if not pairs: words.append(_SCREAMING_SNAKE_CASE ) continue while True: _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float('inf' ) ) ) if bigram not in self.bpe_ranks: break _UpperCAmelCase , _UpperCAmelCase = bigram _UpperCAmelCase = [] _UpperCAmelCase = 0 while i < len(_SCREAMING_SNAKE_CASE ): try: _UpperCAmelCase = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) new_word.extend(word[i:j] ) _UpperCAmelCase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _UpperCAmelCase = tuple(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = new_word if len(_SCREAMING_SNAKE_CASE ) == 1: break else: _UpperCAmelCase = get_pairs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = '@@ '.join(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = word[:-4] _UpperCAmelCase = word words.append(_SCREAMING_SNAKE_CASE ) return " ".join(_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = [] _UpperCAmelCase = re.findall(R'\S+\n?' , _SCREAMING_SNAKE_CASE ) for token in words: split_tokens.extend(list(self.bpe(_SCREAMING_SNAKE_CASE ).split(' ' ) ) ) return split_tokens def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" _UpperCAmelCase = token.lower() return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = ' '.join(_SCREAMING_SNAKE_CASE ).replace('@@ ' , '' ).strip() return out_string def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _UpperCAmelCase = os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) _UpperCAmelCase = os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + '\n' ) _UpperCAmelCase = 0 with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) _UpperCAmelCase = token_index writer.write(' '.join(_SCREAMING_SNAKE_CASE ) + '\n' ) index += 1 return vocab_file, merge_file
329
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowerCAmelCase__ ( a__: Tuple , a__: Optional[Any] , a__: Any ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = AutoConfig.from_pretrained(a__ ) _UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=a__ ) _UpperCAmelCase = checkpoints.load_tax_checkpoint(a__ ) _UpperCAmelCase = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp'] if config.model_type == "t5": _UpperCAmelCase = 'SelfAttention' if config.model_type == "longt5" and config.encoder_attention_type == "local": _UpperCAmelCase = 'LocalSelfAttention' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = 'TransientGlobalSelfAttention' else: raise ValueError( 'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`' ' attribute with a value from [\'local\', \'transient-global].' ) # Encoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel'] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale'] if split_mlp_wi: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['encoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_global_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = tax_mlp_layer_norm _UpperCAmelCase = flax_model_encoder_layer_block # Only for layer 0: _UpperCAmelCase = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_global_rel_embedding # Assigning _UpperCAmelCase = tax_model['target']['encoder']['encoder_norm']['scale'] _UpperCAmelCase = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][ 'scale' ] # Encoder-Decoder-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention'] _UpperCAmelCase = tax_enc_dec_attention_module['key']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['out']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['query']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale'] # MLP if split_mlp_wi: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['decoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_pre_attention_layer_norm _UpperCAmelCase = tax_enc_dec_attention_key _UpperCAmelCase = tax_enc_dec_attention_out _UpperCAmelCase = tax_enc_dec_attention_query _UpperCAmelCase = tax_enc_dec_attention_value _UpperCAmelCase = tax_cross_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = txa_mlp_layer_norm _UpperCAmelCase = flax_model_decoder_layer_block # Decoder Normalization _UpperCAmelCase = tax_model['target']['decoder']['decoder_norm']['scale'] _UpperCAmelCase = txa_decoder_norm # Only for layer 0: _UpperCAmelCase = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_decoder_rel_embedding # Token Embeddings _UpperCAmelCase = tax_model['target']['token_embedder']['embedding'] _UpperCAmelCase = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: _UpperCAmelCase = tax_model['target']['decoder']['logits_dense']['kernel'] flax_model.save_pretrained(a__ ) print('T5X Model was sucessfully converted!' ) if __name__ == "__main__": lowerCAmelCase__ :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) lowerCAmelCase__ :List[str] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
329
1
'''simple docstring''' from collections.abc import Callable def __magic_name__ ( A , A , A ) -> float: snake_case = a snake_case = b if function(A ) == 0: # one of the a or b is a root for the function return a elif function(A ) == 0: return b elif ( function(A ) * function(A ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: snake_case = start + (end - start) / 2.0 while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7 if function(A ) == 0: return mid elif function(A ) * function(A ) < 0: snake_case = mid else: snake_case = mid snake_case = start + (end - start) / 2.0 return mid def __magic_name__ ( A ) -> float: return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_0_0_0)) import doctest doctest.testmod()
332
'''simple docstring''' import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowerCAmelCase_ = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) lowerCAmelCase_ = dataset.iloc[:, 1:2].values lowerCAmelCase_ = dataset.iloc[:, 2].values lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0) lowerCAmelCase_ = PolynomialFeatures(degree=4) lowerCAmelCase_ = poly_reg.fit_transform(X) lowerCAmelCase_ = LinearRegression() pol_reg.fit(X_poly, y) def __magic_name__ ( ) -> Any: plt.scatter(A , A , color='red' ) plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' ) plt.title('Truth or Bluff (Linear Regression)' ) plt.xlabel('Position level' ) plt.ylabel('Salary' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
332
1
'''simple docstring''' from __future__ import annotations def snake_case_ ( _lowerCAmelCase : int ) -> list[int]: UpperCAmelCase : Tuple = [True] * limit UpperCAmelCase : Any = False UpperCAmelCase : Optional[int] = False UpperCAmelCase : Optional[Any] = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): UpperCAmelCase : Optional[int] = i * 2 while index < limit: UpperCAmelCase : int = False UpperCAmelCase : Optional[Any] = index + i UpperCAmelCase : Optional[int] = [2] for i in range(3 , _lowerCAmelCase , 2 ): if is_prime[i]: primes.append(_lowerCAmelCase ) return primes def snake_case_ ( _lowerCAmelCase : int = 1000000 ) -> int: UpperCAmelCase : Optional[int] = prime_sieve(_lowerCAmelCase ) UpperCAmelCase : Optional[Any] = 0 UpperCAmelCase : List[Any] = 0 for i in range(len(_lowerCAmelCase ) ): for j in range(i + length , len(_lowerCAmelCase ) ): UpperCAmelCase : Any = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: UpperCAmelCase : List[str] = j - i UpperCAmelCase : str = sol return largest if __name__ == "__main__": print(F"{solution() = }")
23
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class SCREAMING_SNAKE_CASE( A__ ): """simple docstring""" lowerCamelCase__ = 42 lowerCamelCase__ = 42 def __init__( self : Union[str, Any] , __snake_case : UNetaDModel , __snake_case : ScoreSdeVeScheduler ) -> int: super().__init__() self.register_modules(unet=__snake_case , scheduler=__snake_case ) @torch.no_grad() def __call__( self : Optional[int] , __snake_case : int = 1 , __snake_case : int = 2000 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , **__snake_case : Optional[int] , ) -> Union[ImagePipelineOutput, Tuple]: UpperCAmelCase : str = self.unet.config.sample_size UpperCAmelCase : Union[str, Any] = (batch_size, 3, img_size, img_size) UpperCAmelCase : int = self.unet UpperCAmelCase : Any = randn_tensor(__snake_case , generator=__snake_case ) * self.scheduler.init_noise_sigma UpperCAmelCase : List[Any] = sample.to(self.device ) self.scheduler.set_timesteps(__snake_case ) self.scheduler.set_sigmas(__snake_case ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): UpperCAmelCase : Any = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): UpperCAmelCase : Union[str, Any] = self.unet(__snake_case , __snake_case ).sample UpperCAmelCase : Optional[Any] = self.scheduler.step_correct(__snake_case , __snake_case , generator=__snake_case ).prev_sample # prediction step UpperCAmelCase : Optional[Any] = model(__snake_case , __snake_case ).sample UpperCAmelCase : List[str] = self.scheduler.step_pred(__snake_case , __snake_case , __snake_case , generator=__snake_case ) UpperCAmelCase , UpperCAmelCase : Optional[Any] = output.prev_sample, output.prev_sample_mean UpperCAmelCase : int = sample_mean.clamp(0 , 1 ) UpperCAmelCase : Union[str, Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase : Optional[Any] = self.numpy_to_pil(__snake_case ) if not return_dict: return (sample,) return ImagePipelineOutput(images=__snake_case )
23
1
'''simple docstring''' import re from filelock import FileLock try: import nltk snake_case__ : Union[str, Any] = True except (ImportError, ModuleNotFoundError): snake_case__ : Any = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def _lowerCamelCase ( lowerCamelCase_ : str ): """simple docstring""" re.sub('<n>' , '' , lowerCamelCase_ ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(lowerCamelCase_ ) )
359
'''simple docstring''' snake_case__ : str = '''Tobias Carryer''' from time import time class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ): # noqa: B008 '''simple docstring''' UpperCAmelCase_ : str = multiplier UpperCAmelCase_ : Dict = increment UpperCAmelCase_ : Tuple = modulo UpperCAmelCase_ : Dict = seed def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. snake_case__ : Any = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31) while True: print(lcg.next_number())
274
0
import tensorflow as tf from ...tf_utils import shape_list class UpperCAmelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , A_ , A_ , A_ , A_ , A_=1 , A_=False , **A_ ) -> Union[str, Any]: super().__init__(**A_ ) __UpperCamelCase =vocab_size __UpperCamelCase =d_embed __UpperCamelCase =d_proj __UpperCamelCase =cutoffs + [vocab_size] __UpperCamelCase =[0] + self.cutoffs __UpperCamelCase =div_val __UpperCamelCase =self.cutoffs[0] __UpperCamelCase =len(self.cutoffs ) - 1 __UpperCamelCase =self.shortlist_size + self.n_clusters __UpperCamelCase =keep_order __UpperCamelCase =[] __UpperCamelCase =[] def _a ( self , A_ ) -> List[str]: if self.n_clusters > 0: __UpperCamelCase =self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=A_ , name='cluster_weight' ) __UpperCamelCase =self.add_weight( shape=(self.n_clusters,) , initializer='zeros' , trainable=A_ , name='cluster_bias' ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: __UpperCamelCase =self.add_weight( shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=A_ , name=f'out_projs_._{i}' , ) self.out_projs.append(A_ ) else: self.out_projs.append(A_ ) __UpperCamelCase =self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=A_ , name=f'out_layers_._{i}_._weight' , ) __UpperCamelCase =self.add_weight( shape=(self.vocab_size,) , initializer='zeros' , trainable=A_ , name=f'out_layers_._{i}_._bias' , ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): __UpperCamelCase , __UpperCamelCase =self.cutoff_ends[i], self.cutoff_ends[i + 1] __UpperCamelCase =self.d_embed // (self.div_val**i) __UpperCamelCase =self.add_weight( shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=A_ , name=f'out_projs_._{i}' ) self.out_projs.append(A_ ) __UpperCamelCase =self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=A_ , name=f'out_layers_._{i}_._weight' , ) __UpperCamelCase =self.add_weight( shape=(r_idx - l_idx,) , initializer='zeros' , trainable=A_ , name=f'out_layers_._{i}_._bias' , ) self.out_layers.append((weight, bias) ) super().build(A_ ) @staticmethod def _a ( A_ , A_ , A_ , A_=None ) -> List[Any]: __UpperCamelCase =x if proj is not None: __UpperCamelCase =tf.einsum('ibd,ed->ibe' , A_ , A_ ) return tf.einsum('ibd,nd->ibn' , A_ , A_ ) + b @staticmethod def _a ( A_ , A_ ) -> Dict: __UpperCamelCase =shape_list(A_ ) __UpperCamelCase =tf.range(lp_size[0] , dtype=target.dtype ) __UpperCamelCase =tf.stack([r, target] , 1 ) return tf.gather_nd(A_ , A_ ) def _a ( self , A_ , A_ , A_=True , A_=False ) -> str: __UpperCamelCase =0 if self.n_clusters == 0: __UpperCamelCase =self._logit(A_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] ) if target is not None: __UpperCamelCase =tf.nn.sparse_softmax_cross_entropy_with_logits(labels=A_ , logits=A_ ) __UpperCamelCase =tf.nn.log_softmax(A_ , axis=-1 ) else: __UpperCamelCase =shape_list(A_ ) __UpperCamelCase =[] __UpperCamelCase =tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): __UpperCamelCase , __UpperCamelCase =self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: __UpperCamelCase =(target >= l_idx) & (target < r_idx) __UpperCamelCase =tf.where(A_ ) __UpperCamelCase =tf.boolean_mask(A_ , A_ ) - l_idx if self.div_val == 1: __UpperCamelCase =self.out_layers[0][0][l_idx:r_idx] __UpperCamelCase =self.out_layers[0][1][l_idx:r_idx] else: __UpperCamelCase =self.out_layers[i][0] __UpperCamelCase =self.out_layers[i][1] if i == 0: __UpperCamelCase =tf.concat([cur_W, self.cluster_weight] , 0 ) __UpperCamelCase =tf.concat([cur_b, self.cluster_bias] , 0 ) __UpperCamelCase =self._logit(A_ , A_ , A_ , self.out_projs[0] ) __UpperCamelCase =tf.nn.log_softmax(A_ ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: __UpperCamelCase =tf.boolean_mask(A_ , A_ ) __UpperCamelCase =self._gather_logprob(A_ , A_ ) else: __UpperCamelCase =self._logit(A_ , A_ , A_ , self.out_projs[i] ) __UpperCamelCase =tf.nn.log_softmax(A_ ) __UpperCamelCase =self.cutoffs[0] + i - 1 # No probability for the head cluster __UpperCamelCase =head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(A_ ) if target is not None: __UpperCamelCase =tf.boolean_mask(A_ , A_ ) __UpperCamelCase =tf.boolean_mask(A_ , A_ ) __UpperCamelCase =self._gather_logprob(A_ , A_ ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(A_ , -cur_logprob , shape_list(A_ ) ) __UpperCamelCase =tf.concat(A_ , axis=-1 ) if target is not None: if return_mean: __UpperCamelCase =tf.reduce_mean(A_ ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(A_ ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(A_ , name=self.name , aggregation='mean' if return_mean else '' ) return out
62
"""simple docstring""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging _lowercase : Tuple = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = ['pixel_values'] def __init__( self : Optional[Any], lowerCamelCase : bool = True, lowerCamelCase : Union[int, float] = 1 / 255, lowerCamelCase : bool = True, lowerCamelCase : int = 8, **lowerCamelCase : Tuple, )-> None: super().__init__(**lowerCamelCase ) lowerCamelCase__ : int =do_rescale lowerCamelCase__ : Dict =rescale_factor lowerCamelCase__ : Union[str, Any] =do_pad lowerCamelCase__ : Union[str, Any] =pad_size def snake_case ( self : int, lowerCamelCase : np.ndarray, lowerCamelCase : float, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : int )-> np.ndarray: return rescale(lowerCamelCase, scale=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase ) def snake_case ( self : Optional[Any], lowerCamelCase : np.ndarray, lowerCamelCase : int, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None )-> List[Any]: lowerCamelCase__ , lowerCamelCase__ : Optional[int] =get_image_size(lowerCamelCase ) lowerCamelCase__ : List[str] =(old_height // size + 1) * size - old_height lowerCamelCase__ : List[str] =(old_width // size + 1) * size - old_width return pad(lowerCamelCase, ((0, pad_height), (0, pad_width)), mode='''symmetric''', data_format=lowerCamelCase ) def snake_case ( self : List[Any], lowerCamelCase : ImageInput, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[float] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, lowerCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST, **lowerCamelCase : Union[str, Any], )-> Dict: lowerCamelCase__ : List[str] =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase__ : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase__ : str =do_pad if do_pad is not None else self.do_pad lowerCamelCase__ : int =pad_size if pad_size is not None else self.pad_size lowerCamelCase__ : Optional[int] =make_list_of_images(lowerCamelCase ) if not valid_images(lowerCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. lowerCamelCase__ : Tuple =[to_numpy_array(lowerCamelCase ) for image in images] if do_rescale: lowerCamelCase__ : Tuple =[self.rescale(image=lowerCamelCase, scale=lowerCamelCase ) for image in images] if do_pad: lowerCamelCase__ : Tuple =[self.pad(lowerCamelCase, size=lowerCamelCase ) for image in images] lowerCamelCase__ : int =[to_channel_dimension_format(lowerCamelCase, lowerCamelCase ) for image in images] lowerCamelCase__ : Dict ={'''pixel_values''': images} return BatchFeature(data=lowerCamelCase, tensor_type=lowerCamelCase )
238
0
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> tuple[float, float]: '''simple docstring''' if not len(__UpperCAmelCase ) == len(__UpperCAmelCase ) == 3: raise ValueError('''Please enter a valid equation.''' ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('''Both a & b of two equations can\'t be zero.''' ) # Extract the coefficients snake_case_ ,snake_case_ ,snake_case_ = equationa snake_case_ ,snake_case_ ,snake_case_ = equationa # Calculate the determinants of the matrices snake_case_ = aa * ba - aa * ba snake_case_ = ca * ba - ca * ba snake_case_ = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('''Infinite solutions. (Consistent system)''' ) else: raise ValueError('''No solution. (Inconsistent system)''' ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: snake_case_ = determinant_x / determinant snake_case_ = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
72
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int: '''simple docstring''' if exponent == 1: return base if exponent % 2 == 0: snake_case_ = _modexpt(__UpperCAmelCase, exponent // 2, __UpperCAmelCase ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(__UpperCAmelCase, exponent - 1, __UpperCAmelCase )) % modulo_value def __magic_name__ ( __UpperCAmelCase = 1777, __UpperCAmelCase = 1855, __UpperCAmelCase = 8 ) -> int: '''simple docstring''' snake_case_ = base for _ in range(1, __UpperCAmelCase ): snake_case_ = _modexpt(__UpperCAmelCase, __UpperCAmelCase, 10**digits ) return result if __name__ == "__main__": print(f'''{solution() = }''')
72
1
# Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def lowerCAmelCase_ ( __a ) -> Union[str, Any]: """simple docstring""" return 1 / (1 + np.exp(-z )) def lowerCAmelCase_ ( __a , __a ) -> Dict: """simple docstring""" return (-y * np.log(__a ) - (1 - y) * np.log(1 - h )).mean() def lowerCAmelCase_ ( __a , __a , __a ) -> Optional[Any]: """simple docstring""" lowerCamelCase__: Union[str, Any] =np.dot(__a , __a ) return np.sum(y * scores - np.log(1 + np.exp(__a ) ) ) def lowerCAmelCase_ ( __a , __a , __a , __a=70000 ) -> List[str]: """simple docstring""" lowerCamelCase__: Optional[Any] =np.zeros(x.shape[1] ) for iterations in range(__a ): lowerCamelCase__: Tuple =np.dot(__a , __a ) lowerCamelCase__: Union[str, Any] =sigmoid_function(__a ) lowerCamelCase__: Union[str, Any] =np.dot(x.T , h - y ) / y.size lowerCamelCase__: Optional[Any] =theta - alpha * gradient # updating the weights lowerCamelCase__: List[str] =np.dot(__a , __a ) lowerCamelCase__: List[str] =sigmoid_function(__a ) lowerCamelCase__: str =cost_function(__a , __a ) if iterations % 100 == 0: print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": __A = datasets.load_iris() __A = iris.data[:, :2] __A = (iris.target != 0) * 1 __A = 0.1 __A = logistic_reg(alpha, x, y, max_iterations=7_0000) print("theta: ", theta) # printing the theta i.e our weights vector def lowerCAmelCase_ ( __a ) -> Union[str, Any]: """simple docstring""" return sigmoid_function( np.dot(__a , __a ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0") plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1") ((__A) , (__A)) = (x[:, 0].min(), x[:, 0].max()) ((__A) , (__A)) = (x[:, 1].min(), x[:, 1].max()) ((__A) , (__A)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) __A = np.c_[xxa.ravel(), xxa.ravel()] __A = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black") plt.legend() plt.show()
10
"""simple docstring""" import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _A : List[str] = logging.get_logger(__name__) _A : List[str] = {"""vocab_file""": """spiece.model"""} _A : List[Any] = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), } } _A : str = { """google/bigbird-roberta-base""": 40_96, """google/bigbird-roberta-large""": 40_96, """google/bigbird-base-trivia-itc""": 40_96, } class a__ ( a_ ): __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = ["""input_ids""", """attention_mask"""] __lowerCAmelCase = [] def __init__( self , _a , _a="<unk>" , _a="<s>" , _a="</s>" , _a="<pad>" , _a="[SEP]" , _a="[MASK]" , _a="[CLS]" , _a = None , **_a , ): lowercase : Dict = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token lowercase : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token lowercase : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token lowercase : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token lowercase : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token lowercase : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token # Mask token behave like a normal word, i.e. include the space before it lowercase : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token lowercase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , sep_token=_a , mask_token=_a , cls_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , ) lowercase : Optional[Any] = vocab_file lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_a ) @property def __magic_name__ ( self ): return self.sp_model.get_piece_size() def __magic_name__ ( self ): lowercase : str = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): lowercase : Union[str, Any] = self.__dict__.copy() lowercase : Dict = None return state def __setstate__( self , _a ): lowercase : str = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowercase : List[str] = {} lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __magic_name__ ( self , _a ): return self.sp_model.encode(_a , out_type=_a ) def __magic_name__ ( self , _a ): return self.sp_model.piece_to_id(_a ) def __magic_name__ ( self , _a ): lowercase : Union[str, Any] = self.sp_model.IdToPiece(_a ) return token def __magic_name__ ( self , _a ): lowercase : List[Any] = [] lowercase : List[Any] = "" lowercase : List[Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_a ) + token lowercase : int = True lowercase : Union[str, Any] = [] else: current_sub_tokens.append(_a ) lowercase : int = False out_string += self.sp_model.decode(_a ) return out_string.strip() def __magic_name__ ( self , _a , _a = False , _a = None , _a = True , **_a , ): lowercase : int = kwargs.pop("use_source_tokenizer" , _a ) lowercase : Union[str, Any] = self.convert_ids_to_tokens(_a , skip_special_tokens=_a ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 lowercase : Any = [] lowercase : Dict = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_a ) ) lowercase : int = [] sub_texts.append(_a ) else: current_sub_text.append(_a ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_a ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: lowercase : Dict = re.sub(R" (\[(MASK|SEP)\])" , R"\1" , " ".join(_a ) ) else: lowercase : Union[str, Any] = "".join(_a ) lowercase : int = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: lowercase : Union[str, Any] = self.clean_up_tokenization(_a ) return clean_text else: return text def __magic_name__ ( self , _a , _a = None ): if not os.path.isdir(_a ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase : List[Any] = os.path.join( _a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _a ) elif not os.path.isfile(self.vocab_file ): with open(_a , "wb" ) as fi: lowercase : int = self.sp_model.serialized_model_proto() fi.write(_a ) return (out_vocab_file,) def __magic_name__ ( self , _a , _a = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase : List[str] = [self.cls_token_id] lowercase : Tuple = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def __magic_name__ ( self , _a , _a = None , _a = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a ) if token_ids_a is None: return [1] + ([0] * len(_a )) + [1] return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1] def __magic_name__ ( self , _a , _a = None ): lowercase : Tuple = [self.sep_token_id] lowercase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
202
0
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def SCREAMING_SNAKE_CASE ( ) -> int: a = argparse.ArgumentParser() parser.add_argument( "-m" , "--pretrained_model_name_or_path" , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , ) parser.add_argument( "-c" , "--caption" , type=__UpperCamelCase , default="robotic cat with wings" , help="Text used to generate images." , ) parser.add_argument( "-n" , "--images_num" , type=__UpperCamelCase , default=4 , help="How much images to generate." , ) parser.add_argument( "-s" , "--seed" , type=__UpperCamelCase , default=42 , help="Seed for random process." , ) parser.add_argument( "-ci" , "--cuda_id" , type=__UpperCamelCase , default=0 , help="cuda_id." , ) a = parser.parse_args() return args def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> Dict: if not len(__UpperCamelCase) == rows * cols: raise ValueError("The specified number of rows and columns are not correct.") a , a = imgs[0].size a = Image.new("RGB" , size=(cols * w, rows * h)) a , a = grid.size for i, img in enumerate(__UpperCamelCase): grid.paste(__UpperCamelCase , box=(i % cols * w, i // cols * h)) return grid def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase="robotic cat with wings" , __UpperCamelCase=7.5 , __UpperCamelCase=50 , __UpperCamelCase=1 , __UpperCamelCase=42 , ) -> List[Any]: a = torch.Generator(pipeline.device).manual_seed(__UpperCamelCase) a = pipeline( __UpperCamelCase , guidance_scale=__UpperCamelCase , num_inference_steps=__UpperCamelCase , generator=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , ).images a = int(math.sqrt(__UpperCamelCase)) a = image_grid(__UpperCamelCase , rows=_rows , cols=num_images_per_prompt // _rows) return grid, images lowercase__ : List[str] = parse_args() # Load models and create wrapper for stable diffusion lowercase__ : Tuple = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") lowercase__ : List[str] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") lowercase__ : Optional[int] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae") lowercase__ : Optional[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet") lowercase__ : Tuple = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) lowercase__ : List[Any] = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")): lowercase__ : int = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, "unet", unet) else: lowercase__ : Union[str, Any] = unet.to(torch.device("cuda", args.cuda_id)) lowercase__ : List[str] = pipeline.to(unet.device) lowercase__ , lowercase__ : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split())))) lowercase__ : Optional[Any] = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
180
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(UpperCamelCase__ ) , """Tatoeba directory does not exist.""" ) class a__ ( unittest.TestCase ): @cached_property def lowerCAmelCase_ ( self ) -> List[str]: '''simple docstring''' a = tempfile.mkdtemp() return TatoebaConverter(save_dir=A ) @slow def lowerCAmelCase_ ( self ) -> int: '''simple docstring''' self.resolver.convert_models(["heb-eng"] ) @slow def lowerCAmelCase_ ( self ) -> Dict: '''simple docstring''' a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=A ) assert mmeta["long_pair"] == "heb-eng"
180
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) UpperCAmelCase__ = { "configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"], "processing_speech_to_text": ["Speech2TextProcessor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["Speech2TextTokenizer"] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["Speech2TextFeatureExtractor"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSpeech2TextForConditionalGeneration", "TFSpeech2TextModel", "TFSpeech2TextPreTrainedModel", ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Speech2TextForConditionalGeneration", "Speech2TextModel", "Speech2TextPreTrainedModel", ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
import math def _a ( a :int ) -> list: a = [True] * n a = False a = False a = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): a = i * 2 while index < n: a = False a = index + i a = [2] for i in range(3 , a , 2 ): if is_prime[i]: primes.append(a ) return primes def _a ( a :int = 999_966_663_333 ) -> int: a = math.floor(math.sqrt(a ) ) + 100 a = prime_sieve(a ) a = 0 a = 0 a = primes[prime_index] while (last_prime**2) <= limit: a = primes[prime_index + 1] a = last_prime**2 a = next_prime**2 # Get numbers divisible by lps(current) a = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) a = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps a = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair a = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
0
1
from math import isqrt, loga def lowerCAmelCase_ ( _snake_case : Optional[Any] ) -> list[int]: '''simple docstring''' __magic_name__ : List[str] = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __snake_case , __snake_case ): __magic_name__ : Any = False return [i for i in range(2 , __snake_case ) if is_prime[i]] def lowerCAmelCase_ ( _snake_case : Any = 800800 , _snake_case : str = 800800 ) -> int: '''simple docstring''' __magic_name__ : Union[str, Any] = degree * loga(__snake_case ) __magic_name__ : Optional[Any] = int(__snake_case ) __magic_name__ : str = calculate_prime_numbers(__snake_case ) __magic_name__ : int = 0 __magic_name__ : Optional[int] = 0 __magic_name__ : List[Any] = len(__snake_case ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F"{solution() = }")
361
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case : Dict = logging.get_logger(__name__) snake_case : Optional[int] = { "naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _snake_case ( snake_case ): UpperCamelCase__ = 'donut-swin' UpperCamelCase__ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , _a=224 , _a=4 , _a=3 , _a=96 , _a=[2, 2, 6, 2] , _a=[3, 6, 12, 24] , _a=7 , _a=4.0 , _a=True , _a=0.0 , _a=0.0 , _a=0.1 , _a="gelu" , _a=False , _a=0.02 , _a=1e-5 , **_a , ): super().__init__(**_a ) __magic_name__ : Optional[int] = image_size __magic_name__ : Any = patch_size __magic_name__ : Tuple = num_channels __magic_name__ : Dict = embed_dim __magic_name__ : Dict = depths __magic_name__ : int = len(_a ) __magic_name__ : str = num_heads __magic_name__ : Tuple = window_size __magic_name__ : Dict = mlp_ratio __magic_name__ : List[str] = qkv_bias __magic_name__ : Any = hidden_dropout_prob __magic_name__ : str = attention_probs_dropout_prob __magic_name__ : Union[str, Any] = drop_path_rate __magic_name__ : List[Any] = hidden_act __magic_name__ : List[Any] = use_absolute_embeddings __magic_name__ : Union[str, Any] = layer_norm_eps __magic_name__ : Optional[Any] = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __magic_name__ : Tuple = int(embed_dim * 2 ** (len(_a ) - 1) )
41
0
from ..utils import DummyObject, requires_backends class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Tuple: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->List[str]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->str: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->str: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Optional[int]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->str: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Optional[int]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Any: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->List[str]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->List[Any]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->List[str]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->int: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->List[str]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->List[Any]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->int: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Tuple: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Any: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Union[str, Any]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->int: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Optional[int]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->int: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Any: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Optional[Any]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Dict: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->int: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->str: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Optional[int]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Union[str, Any]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Optional[Any]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->List[str]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->int: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] )
275
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __lowercase (_UpperCAmelCase ): _UpperCamelCase = 42 _UpperCamelCase = 42 _UpperCamelCase = None class __lowercase (_UpperCAmelCase , _UpperCAmelCase ): _UpperCamelCase = 2 @register_to_config def __init__( self , A_ = 0.02 , A_ = 100 , A_ = 1.007 , A_ = 80 , A_ = 0.05 , A_ = 50 , ) ->int: '''simple docstring''' __lowerCAmelCase : Optional[int] = sigma_max # setable values __lowerCAmelCase : int = None __lowerCAmelCase : np.IntTensor = None __lowerCAmelCase : torch.FloatTensor = None # sigma(t_i) def UpperCamelCase__ ( self , A_ , A_ = None ) ->torch.FloatTensor: '''simple docstring''' return sample def UpperCamelCase__ ( self , A_ , A_ = None ) ->List[str]: '''simple docstring''' __lowerCAmelCase : str = num_inference_steps __lowerCAmelCase : Dict = np.arange(0 , self.num_inference_steps )[::-1].copy() __lowerCAmelCase : Optional[Any] = torch.from_numpy(A_ ).to(A_ ) __lowerCAmelCase : Tuple = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] __lowerCAmelCase : Optional[int] = torch.tensor(A_ , dtype=torch.floataa , device=A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ = None ) ->Tuple[torch.FloatTensor, float]: '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: __lowerCAmelCase : List[str] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: __lowerCAmelCase : List[str] = 0 # sample eps ~ N(0, S_noise^2 * I) __lowerCAmelCase : int = self.config.s_noise * randn_tensor(sample.shape , generator=A_ ).to(sample.device ) __lowerCAmelCase : str = sigma + gamma * sigma __lowerCAmelCase : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ = True , ) ->Union[KarrasVeOutput, Tuple]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = sample_hat + sigma_hat * model_output __lowerCAmelCase : int = (sample_hat - pred_original_sample) / sigma_hat __lowerCAmelCase : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A_ , derivative=A_ , pred_original_sample=A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ = True , ) ->Union[KarrasVeOutput, Tuple]: '''simple docstring''' __lowerCAmelCase : str = sample_prev + sigma_prev * model_output __lowerCAmelCase : List[Any] = (sample_prev - pred_original_sample) / sigma_prev __lowerCAmelCase : Any = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A_ , derivative=A_ , pred_original_sample=A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->Any: '''simple docstring''' raise NotImplementedError()
275
1
'''simple docstring''' from __future__ import annotations def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): # Checks if the entire collection has been sorted if len(UpperCamelCase__ ) <= 1 or n <= 1: return insert_next(UpperCamelCase__ , n - 1 ) rec_insertion_sort(UpperCamelCase__ , n - 1 ) def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): # Checks order between adjacent elements if index >= len(UpperCamelCase__ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order UpperCAmelCase__ : Optional[Any] = ( collection[index], collection[index - 1], ) insert_next(UpperCamelCase__ , index + 1 ) if __name__ == "__main__": __A =input('Enter integers separated by spaces: ') __A =[int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
365
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _snake_case : lowerCAmelCase :str = PegasusConfig lowerCAmelCase :Optional[int] = {} lowerCAmelCase :Dict = '''gelu''' def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=40 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , ): UpperCAmelCase__ : Union[str, Any] = parent UpperCAmelCase__ : Any = batch_size UpperCAmelCase__ : Union[str, Any] = seq_length UpperCAmelCase__ : str = is_training UpperCAmelCase__ : Dict = use_labels UpperCAmelCase__ : Optional[int] = vocab_size UpperCAmelCase__ : int = hidden_size UpperCAmelCase__ : Union[str, Any] = num_hidden_layers UpperCAmelCase__ : str = num_attention_heads UpperCAmelCase__ : Any = intermediate_size UpperCAmelCase__ : int = hidden_dropout_prob UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase__ : List[str] = max_position_embeddings UpperCAmelCase__ : Union[str, Any] = eos_token_id UpperCAmelCase__ : Any = pad_token_id UpperCAmelCase__ : Dict = bos_token_id def snake_case__ ( self): UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) UpperCAmelCase__ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) UpperCAmelCase__ : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1) UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) UpperCAmelCase__ : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCAmelCase__ : Union[str, Any] = prepare_pegasus_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) return config, inputs_dict def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase): UpperCAmelCase__ : Optional[int] = TFPegasusModel(config=_lowerCamelCase).get_decoder() UpperCAmelCase__ : Optional[int] = inputs_dict["""input_ids"""] UpperCAmelCase__ : Any = input_ids[:1, :] UpperCAmelCase__ : List[str] = inputs_dict["""attention_mask"""][:1, :] UpperCAmelCase__ : int = inputs_dict["""head_mask"""] UpperCAmelCase__ : int = 1 # first forward pass UpperCAmelCase__ : int = model(_lowerCamelCase , attention_mask=_lowerCamelCase , head_mask=_lowerCamelCase , use_cache=_lowerCamelCase) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size) UpperCAmelCase__ : int = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta) # append to next input_ids and UpperCAmelCase__ : Tuple = tf.concat([input_ids, next_tokens] , axis=-1) UpperCAmelCase__ : str = tf.concat([attention_mask, next_attn_mask] , axis=-1) UpperCAmelCase__ : Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase)[0] UpperCAmelCase__ : Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase)[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1]) # select random slice UpperCAmelCase__ : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1])) UpperCAmelCase__ : int = output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase__ : List[str] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_lowerCamelCase , _lowerCamelCase , rtol=1e-3) def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ): if attention_mask is None: UpperCAmelCase__ : Any = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase__ : Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase__ : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _snake_case ( a__ , a__ , unittest.TestCase ): lowerCAmelCase :str = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () lowerCAmelCase :Any = (TFPegasusForConditionalGeneration,) if is_tf_available() else () lowerCAmelCase :List[str] = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) lowerCAmelCase :int = True lowerCAmelCase :Optional[int] = False lowerCAmelCase :Dict = False def snake_case__ ( self): UpperCAmelCase__ : List[str] = TFPegasusModelTester(self) UpperCAmelCase__ : Dict = ConfigTester(self , config_class=_lowerCamelCase) def snake_case__ ( self): self.config_tester.run_common_tests() def snake_case__ ( self): UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase) @require_sentencepiece @require_tokenizers @require_tf class _snake_case ( unittest.TestCase ): lowerCAmelCase :Dict = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] lowerCAmelCase :Any = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers lowerCAmelCase :Tuple = '''google/pegasus-xsum''' @cached_property def snake_case__ ( self): return AutoTokenizer.from_pretrained(self.model_name) @cached_property def snake_case__ ( self): UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name) return model def snake_case__ ( self , **_lowerCamelCase): UpperCAmelCase__ : Dict = self.translate_src_text(**_lowerCamelCase) assert self.expected_text == generated_words def snake_case__ ( self , **_lowerCamelCase): UpperCAmelCase__ : List[Any] = self.tokenizer(self.src_text , **_lowerCamelCase , padding=_lowerCamelCase , return_tensors="""tf""") UpperCAmelCase__ : Dict = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_lowerCamelCase , ) UpperCAmelCase__ : Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowerCamelCase) return generated_words @slow def snake_case__ ( self): self._assert_generated_batch_equal_expected()
283
0
"""simple docstring""" from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): __lowercase ,__lowercase : str = coefficient_matrix.shape __lowercase ,__lowercase : Union[str, Any] = constant_matrix.shape if rowsa != colsa: __lowercase : Optional[int] = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}""" raise ValueError(_UpperCAmelCase ) if colsa != 1: __lowercase : str = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}""" raise ValueError(_UpperCAmelCase ) if rowsa != rowsa: __lowercase : Optional[int] = ( '''Coefficient and constant matrices dimensions must be nxn and nx1 but ''' f"""received {rowsa}x{colsa} and {rowsa}x{colsa}""" ) raise ValueError(_UpperCAmelCase ) if len(_UpperCAmelCase ) != rowsa: __lowercase : Dict = ( '''Number of initial values must be equal to number of rows in coefficient ''' f"""matrix but received {len(_UpperCAmelCase )} and {rowsa}""" ) raise ValueError(_UpperCAmelCase ) if iterations <= 0: raise ValueError('''Iterations must be at least 1''' ) __lowercase : Dict = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) __lowercase ,__lowercase : Optional[int] = table.shape strictly_diagonally_dominant(_UpperCAmelCase ) # Iterates the whole matrix for given number of times for _ in range(_UpperCAmelCase ): __lowercase : Optional[Any] = [] for row in range(_UpperCAmelCase ): __lowercase : Union[str, Any] = 0 for col in range(_UpperCAmelCase ): if col == row: __lowercase : Optional[Any] = table[row][col] elif col == cols - 1: __lowercase : Dict = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] __lowercase : List[Any] = (temp + val) / denom new_val.append(_UpperCAmelCase ) __lowercase : List[Any] = new_val return [float(_UpperCAmelCase ) for i in new_val] def __UpperCAmelCase ( __UpperCamelCase ): __lowercase ,__lowercase : Optional[int] = table.shape __lowercase : List[str] = True for i in range(0 , _UpperCAmelCase ): __lowercase : List[Any] = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
249
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer UpperCAmelCase__ = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast UpperCAmelCase__ = TaTokenizerFast UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys UpperCAmelCase__ = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
339
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowercase_ = { "configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["VivitImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "VivitModel", "VivitPreTrainedModel", "VivitForVideoClassification", ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
370
import comet # From: unbabel-comet import torch import datasets lowercase_ = datasets.logging.get_logger(__name__) lowercase_ = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n" lowercase_ = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n" lowercase_ = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): """simple docstring""" def snake_case__ ( self : Optional[Any] )-> str: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION,citation=_CITATION,homepage='https://unbabel.github.io/COMET/html/index.html',inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features( { 'sources': datasets.Value('string',id='sequence' ), 'predictions': datasets.Value('string',id='sequence' ), 'references': datasets.Value('string',id='sequence' ), } ),codebase_urls=['https://github.com/Unbabel/COMET'],reference_urls=[ 'https://github.com/Unbabel/COMET', 'https://www.aclweb.org/anthology/2020.emnlp-main.213/', 'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6', ],) def snake_case__ ( self : Optional[Any],lowercase_ : Dict )-> str: '''simple docstring''' if self.config_name == "default": A__ = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) ) else: A__ = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def snake_case__ ( self : List[Any],lowercase_ : Dict,lowercase_ : int,lowercase_ : List[Any],lowercase_ : Tuple=None,lowercase_ : List[str]=False )-> int: '''simple docstring''' if gpus is None: A__ = 1 if torch.cuda.is_available() else 0 A__ = {'src': sources, 'mt': predictions, 'ref': references} A__ = [dict(zip(lowercase_,lowercase_ ) ) for t in zip(*data.values() )] A__ , A__ = self.scorer.predict(lowercase_,gpus=lowercase_,progress_bar=lowercase_ ) return {"mean_score": mean_score, "scores": scores}
282
0
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : int = {'tokenizer_file': 'tokenizer.json'} __UpperCamelCase : str = { 'tokenizer_file': { 'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json', 'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json', 'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json', 'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json', 'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json', 'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json', 'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json', }, } class lowercase__ ( UpperCAmelCase__): UpperCamelCase_ = VOCAB_FILES_NAMES UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ = ["input_ids", "attention_mask"] UpperCamelCase_ = None def __init__( self : Tuple , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : List[Any]="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Any="<pad>" , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : int=False , **UpperCamelCase__ : Tuple , ): '''simple docstring''' super().__init__( snake_case__ , snake_case__ , tokenizer_file=snake_case__ , unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , add_prefix_space=snake_case__ , clean_up_tokenization_spaces=snake_case__ , **snake_case__ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , snake_case__ ) != add_prefix_space: SCREAMING_SNAKE_CASE : Optional[Any] = getattr(snake_case__ , pre_tok_state.pop('''type''' ) ) SCREAMING_SNAKE_CASE : int = add_prefix_space SCREAMING_SNAKE_CASE : Dict = pre_tok_class(**snake_case__ ) SCREAMING_SNAKE_CASE : int = add_prefix_space def __A ( self : Dict , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.get('''is_split_into_words''' , snake_case__ ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with""" ''' pretokenized inputs.''' ) return super()._batch_encode_plus(*snake_case__ , **snake_case__ ) def __A ( self : List[Any] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = kwargs.get('''is_split_into_words''' , snake_case__ ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with""" ''' pretokenized inputs.''' ) return super()._encode_plus(*snake_case__ , **snake_case__ ) def __A ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ ) def __A ( self : Union[str, Any] , UpperCamelCase__ : "Conversation" ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(snake_case__ , add_special_tokens=snake_case__ ) + [self.eos_token_id] ) if len(snake_case__ ) > self.model_max_length: SCREAMING_SNAKE_CASE : List[Any] = input_ids[-self.model_max_length :] return input_ids
182
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING lowercase_ : Tuple = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase__ ) class __lowerCAmelCase ( UpperCAmelCase__ ): def __init__( self : str , *snake_case__ : Any , **snake_case__ : str ): """simple docstring""" super().__init__(*snake_case__ , **snake_case__ ) requires_backends(self , "vision" ) self.check_model_type(snake_case__ ) def __call__( self : Dict , snake_case__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **snake_case__ : List[Any] ): """simple docstring""" return super().__call__(snake_case__ , **snake_case__ ) def UpperCamelCase ( self : List[Any] , **snake_case__ : Tuple ): """simple docstring""" return {}, {}, {} def UpperCamelCase ( self : Any , snake_case__ : Optional[int] ): """simple docstring""" _UpperCAmelCase = load_image(snake_case__ ) _UpperCAmelCase = image.size _UpperCAmelCase = self.image_processor(images=snake_case__ , return_tensors=self.framework ) return model_inputs def UpperCamelCase ( self : int , snake_case__ : int ): """simple docstring""" _UpperCAmelCase = self.model(**snake_case__ ) return model_outputs def UpperCamelCase ( self : List[Any] , snake_case__ : str ): """simple docstring""" _UpperCAmelCase = model_outputs.predicted_depth _UpperCAmelCase = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=snake_case__ ) _UpperCAmelCase = prediction.squeeze().cpu().numpy() _UpperCAmelCase = (output * 255 / np.max(snake_case__ )).astype("uint8" ) _UpperCAmelCase = Image.fromarray(snake_case__ ) _UpperCAmelCase = {} _UpperCAmelCase = predicted_depth _UpperCAmelCase = depth return output_dict
133
0
from torch import nn class a_ ( nn.Module ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]: super().__init__() SCREAMING_SNAKE_CASE : List[Any] = class_size SCREAMING_SNAKE_CASE : Optional[Any] = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(_lowerCamelCase , _lowerCamelCase ) def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]: # hidden_state = nn.functional.relu(self.mlp1(hidden_state)) # hidden_state = self.mlp2(hidden_state) SCREAMING_SNAKE_CASE : str = self.mlp(_lowerCamelCase ) return logits
355
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def UpperCAmelCase_( ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=a__ ) SCREAMING_SNAKE_CASE : int = parser.add_subparsers(help='''accelerate command helpers''' ) # Register commands get_config_parser(subparsers=a__ ) env_command_parser(subparsers=a__ ) launch_command_parser(subparsers=a__ ) tpu_command_parser(subparsers=a__ ) test_command_parser(subparsers=a__ ) # Let's go SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() if not hasattr(a__ , '''func''' ): parser.print_help() exit(1 ) # Run args.func(a__ ) if __name__ == "__main__": main()
19
0
"""simple docstring""" import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : List[str] = RoCBertTokenizer UpperCAmelCase : int = None UpperCAmelCase : Tuple = False UpperCAmelCase : Tuple = True UpperCAmelCase : Tuple = filter_non_english def lowerCAmelCase_ ( self : Union[str, Any] ): super().setUp() _A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd'] _A = {} _A = {} for i, value in enumerate(_UpperCAmelCase ): _A = i _A = i _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] ) _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer: json.dump(_UpperCAmelCase , _UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer: json.dump(_UpperCAmelCase , _UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _A = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) _A = tokenizer.tokenize('你好[SEP]你是谁' ) self.assertListEqual(_UpperCAmelCase , ['你', '好', '[SEP]', '你', '是', '谁'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] ) def lowerCAmelCase_ ( self : Dict ): _A = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase_ ( self : Optional[int] ): _A = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def lowerCAmelCase_ ( self : Tuple ): _A = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase_ ( self : int ): _A = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase_ ( self : Dict ): _A = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase_ ( self : str ): _A = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] _A = {} for i, token in enumerate(_UpperCAmelCase ): _A = i _A = RoCBertWordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) def lowerCAmelCase_ ( self : Optional[Any] ): self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def lowerCAmelCase_ ( self : Union[str, Any] ): self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def lowerCAmelCase_ ( self : Union[str, Any] ): self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def lowerCAmelCase_ ( self : Tuple ): _A = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_UpperCAmelCase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) if self.test_rust_tokenizer: _A = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(_UpperCAmelCase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) def lowerCAmelCase_ ( self : Any ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _A = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) _A = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' _A = tokenizer_r.encode_plus( _UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , ) _A = tokenizer_r.do_lower_case if hasattr(_UpperCAmelCase , 'do_lower_case' ) else False _A = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] ) def lowerCAmelCase_ ( self : int ): _A = ['的', '人', '有'] _A = ''.join(_UpperCAmelCase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _A = True _A = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) _A = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) _A = tokenizer_p.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) _A = tokenizer_r.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) _A = tokenizer_r.convert_ids_to_tokens(_UpperCAmelCase ) _A = tokenizer_p.convert_ids_to_tokens(_UpperCAmelCase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) _A = False _A = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) _A = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) _A = tokenizer_r.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) _A = tokenizer_p.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) _A = tokenizer_r.convert_ids_to_tokens(_UpperCAmelCase ) _A = tokenizer_p.convert_ids_to_tokens(_UpperCAmelCase ) # it is expected that only the first Chinese character is not preceded by "##". _A = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_UpperCAmelCase ) ] self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): _A = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) _A = tokenizer.encode('你好' , add_special_tokens=_UpperCAmelCase ) _A = tokenizer.encode('你是谁' , add_special_tokens=_UpperCAmelCase ) _A = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ) _A = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def lowerCAmelCase_ ( self : Optional[int] ): _A = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _A = '你好,你是谁' _A = tokenizer.tokenize(_UpperCAmelCase ) _A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) _A = tokenizer.convert_tokens_to_shape_ids(_UpperCAmelCase ) _A = tokenizer.convert_tokens_to_pronunciation_ids(_UpperCAmelCase ) _A = tokenizer.prepare_for_model( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) _A = tokenizer.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
315
"""simple docstring""" import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(__lowerCAmelCase ) , '''Tatoeba directory does not exist.''' ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self : Optional[Any] ): _A = tempfile.mkdtemp() return TatoebaConverter(save_dir=_UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : Optional[int] ): self.resolver.convert_models(['heb-eng'] ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): _A , _A = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
315
1
"""simple docstring""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer UpperCamelCase : List[Any] = logging.get_logger(__name__) UpperCamelCase : List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase : Any = { "vocab_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } UpperCamelCase : Any = { "vocab_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } UpperCamelCase : Optional[Any] = { "vocab_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json" ), }, } UpperCamelCase : int = { "facebook/dpr-ctx_encoder-single-nq-base": 5_1_2, "facebook/dpr-ctx_encoder-multiset-base": 5_1_2, } UpperCamelCase : Optional[int] = { "facebook/dpr-question_encoder-single-nq-base": 5_1_2, "facebook/dpr-question_encoder-multiset-base": 5_1_2, } UpperCamelCase : Optional[Any] = { "facebook/dpr-reader-single-nq-base": 5_1_2, "facebook/dpr-reader-multiset-base": 5_1_2, } UpperCamelCase : Tuple = { "facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True}, } UpperCamelCase : Any = { "facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True}, } UpperCamelCase : str = { "facebook/dpr-reader-single-nq-base": {"do_lower_case": True}, "facebook/dpr-reader-multiset-base": {"do_lower_case": True}, } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = VOCAB_FILES_NAMES lowercase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowercase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION lowercase = DPRContextEncoderTokenizer class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = VOCAB_FILES_NAMES lowercase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowercase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION lowercase = DPRQuestionEncoderTokenizer UpperCamelCase : str = collections.namedtuple( "DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"] ) UpperCamelCase : Union[str, Any] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"]) UpperCamelCase : Tuple = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n " @add_start_docstrings(__SCREAMING_SNAKE_CASE ) class __lowerCAmelCase : def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' if titles is None and texts is None: return super().__call__( __UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , ) elif titles is None or texts is None: __UpperCamelCase = titles if texts is None else texts return super().__call__( __UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCamelCase = titles if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) else [titles] __UpperCamelCase = texts if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) else [texts] __UpperCamelCase = len(__UpperCAmelCase ) __UpperCamelCase = questions if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) else [questions] * n_passages assert len(__UpperCAmelCase ) == len( __UpperCAmelCase ), F'There should be as many titles than texts but got {len(__UpperCAmelCase )} titles and {len(__UpperCAmelCase )} texts.' __UpperCamelCase = super().__call__(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )['input_ids'] __UpperCamelCase = super().__call__(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )['input_ids'] __UpperCamelCase = { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(__UpperCAmelCase , __UpperCAmelCase ) ] } if return_attention_mask is not False: __UpperCamelCase = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __UpperCamelCase = attention_mask return self.pad(__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 16 , __UpperCAmelCase = 64 , __UpperCAmelCase = 4 , ): '''simple docstring''' __UpperCamelCase = reader_input['input_ids'] __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = reader_output[:3] __UpperCamelCase = len(__UpperCAmelCase ) __UpperCamelCase = sorted(range(__UpperCAmelCase ) , reverse=__UpperCAmelCase , key=relevance_logits.__getitem__ ) __UpperCamelCase = [] for doc_id in sorted_docs: __UpperCamelCase = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __UpperCamelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __UpperCamelCase = sequence_ids.index(self.pad_token_id ) else: __UpperCamelCase = len(__UpperCAmelCase ) __UpperCamelCase = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCAmelCase , top_spans=__UpperCAmelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCAmelCase , start_index=__UpperCAmelCase , end_index=__UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(__UpperCAmelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = [] for start_index, start_score in enumerate(__UpperCAmelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __UpperCamelCase = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[1] , reverse=__UpperCAmelCase ) __UpperCamelCase = [] for (start_index, end_index), score in scores: assert start_index <= end_index, F'Wrong span indices: [{start_index}:{end_index}]' __UpperCamelCase = end_index - start_index + 1 assert length <= max_answer_length, F'Span is too long: {length} > {max_answer_length}' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(__UpperCAmelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowercase = VOCAB_FILES_NAMES lowercase = READER_PRETRAINED_VOCAB_FILES_MAP lowercase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = READER_PRETRAINED_INIT_CONFIGURATION lowercase = ["input_ids", "attention_mask"] lowercase = DPRReaderTokenizer
370
"""simple docstring""" def A ( snake_case :list[list[int]] , snake_case :int , snake_case :int , snake_case :list[int] ) -> bool: # 1. Validate that path exists between current and next vertices if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def A ( snake_case :list[list[int]] , snake_case :list[int] , snake_case :int ) -> bool: # Base Case if curr_ind == len(snake_case ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(snake_case ) ): if valid_connection(snake_case , snake_case , snake_case , snake_case ): # Insert current vertex into path as next transition __UpperCamelCase = next_ver # Validate created path if util_hamilton_cycle(snake_case , snake_case , curr_ind + 1 ): return True # Backtrack __UpperCamelCase = -1 return False def A ( snake_case :list[list[int]] , snake_case :int = 0 ) -> list[int]: __UpperCamelCase = [-1] * (len(snake_case ) + 1) # initialize start and end of path with starting index __UpperCamelCase = __UpperCamelCase = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(snake_case , snake_case , 1 ) else []
263
0
"""simple docstring""" import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput _UpperCamelCase: List[str] = 'scheduler_config.json' class a__ ( SCREAMING_SNAKE_CASE__ ): _lowerCamelCase = 1 _lowerCamelCase = 2 _lowerCamelCase = 3 _lowerCamelCase = 4 _lowerCamelCase = 5 _lowerCamelCase = 6 _lowerCamelCase = 7 _lowerCamelCase = 8 _lowerCamelCase = 9 _lowerCamelCase = 10 _lowerCamelCase = 11 _lowerCamelCase = 12 _lowerCamelCase = 13 _lowerCamelCase = 14 @dataclass class a__ ( SCREAMING_SNAKE_CASE__ ): _lowerCamelCase = 42 class a__ : _lowerCamelCase = SCHEDULER_CONFIG_NAME _lowerCamelCase = [] _lowerCamelCase = True @classmethod def lowercase ( cls : Tuple, lowerCAmelCase : Dict[str, Any] = None, lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Dict=False, **lowerCAmelCase : Optional[Any], ) -> Tuple: lowercase , lowercase , lowercase : List[str] = cls.load_config( pretrained_model_name_or_path=lowerCAmelCase, subfolder=lowerCAmelCase, return_unused_kwargs=lowerCAmelCase, return_commit_hash=lowerCAmelCase, **lowerCAmelCase, ) return cls.from_config(lowerCAmelCase, return_unused_kwargs=lowerCAmelCase, **lowerCAmelCase ) def lowercase ( self : Tuple, lowerCAmelCase : Union[str, os.PathLike], lowerCAmelCase : bool = False, **lowerCAmelCase : Optional[Any] ) -> List[str]: self.save_config(save_directory=lowerCAmelCase, push_to_hub=lowerCAmelCase, **lowerCAmelCase ) @property def lowercase ( self : List[Any] ) -> List[str]: return self._get_compatibles() @classmethod def lowercase ( cls : Union[str, Any] ) -> List[Any]: lowercase : List[Any] = list(set([cls.__name__] + cls._compatibles ) ) lowercase : List[str] = importlib.import_module(__name__.split('.' )[0] ) lowercase : List[str] = [ getattr(lowerCAmelCase, lowerCAmelCase ) for c in compatible_classes_str if hasattr(lowerCAmelCase, lowerCAmelCase ) ] return compatible_classes
255
"""simple docstring""" _UpperCamelCase: Dict = [ 9_9_9, 8_0_0, 7_9_9, 6_0_0, 5_9_9, 5_0_0, 4_0_0, 3_9_9, 3_7_7, 3_5_5, 3_3_3, 3_1_1, 2_8_8, 2_6_6, 2_4_4, 2_2_2, 2_0_0, 1_9_9, 1_7_7, 1_5_5, 1_3_3, 1_1_1, 8_8, 6_6, 4_4, 2_2, 0, ] _UpperCamelCase: Optional[int] = [ 9_9_9, 9_7_6, 9_5_2, 9_2_8, 9_0_5, 8_8_2, 8_5_8, 8_5_7, 8_1_0, 7_6_2, 7_1_5, 7_1_4, 5_7_2, 4_2_9, 4_2_8, 2_8_6, 2_8_5, 2_3_8, 1_9_0, 1_4_3, 1_4_2, 1_1_8, 9_5, 7_1, 4_7, 2_4, 0, ] _UpperCamelCase: int = [ 9_9_9, 9_8_8, 9_7_7, 9_6_6, 9_5_5, 9_4_4, 9_3_3, 9_2_2, 9_1_1, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_5_0, 3_0_0, 2_9_9, 2_6_6, 2_3_3, 2_0_0, 1_9_9, 1_7_9, 1_5_9, 1_4_0, 1_2_0, 1_0_0, 9_9, 8_8, 7_7, 6_6, 5_5, 4_4, 3_3, 2_2, 1_1, 0, ] _UpperCamelCase: List[str] = [ 9_9_9, 9_9_5, 9_9_2, 9_8_9, 9_8_5, 9_8_1, 9_7_8, 9_7_5, 9_7_1, 9_6_7, 9_6_4, 9_6_1, 9_5_7, 9_5_6, 9_5_1, 9_4_7, 9_4_2, 9_3_7, 9_3_3, 9_2_8, 9_2_3, 9_1_9, 9_1_4, 9_1_3, 9_0_8, 9_0_3, 8_9_7, 8_9_2, 8_8_7, 8_8_1, 8_7_6, 8_7_1, 8_7_0, 8_6_4, 8_5_8, 8_5_2, 8_4_6, 8_4_0, 8_3_4, 8_2_8, 8_2_7, 8_2_0, 8_1_3, 8_0_6, 7_9_9, 7_9_2, 7_8_5, 7_8_4, 7_7_7, 7_7_0, 7_6_3, 7_5_6, 7_4_9, 7_4_2, 7_4_1, 7_3_3, 7_2_4, 7_1_6, 7_0_7, 6_9_9, 6_9_8, 6_8_8, 6_7_7, 6_6_6, 6_5_6, 6_5_5, 6_4_5, 6_3_4, 6_2_3, 6_1_3, 6_1_2, 5_9_8, 5_8_4, 5_7_0, 5_6_9, 5_5_5, 5_4_1, 5_2_7, 5_2_6, 5_0_5, 4_8_4, 4_8_3, 4_6_2, 4_4_0, 4_3_9, 3_9_6, 3_9_5, 3_5_2, 3_5_1, 3_0_8, 3_0_7, 2_6_4, 2_6_3, 2_2_0, 2_1_9, 1_7_6, 1_3_2, 8_8, 4_4, 0, ] _UpperCamelCase: Any = [ 9_9_9, 9_9_7, 9_9_5, 9_9_2, 9_9_0, 9_8_8, 9_8_6, 9_8_4, 9_8_1, 9_7_9, 9_7_7, 9_7_5, 9_7_2, 9_7_0, 9_6_8, 9_6_6, 9_6_4, 9_6_1, 9_5_9, 9_5_7, 9_5_6, 9_5_4, 9_5_1, 9_4_9, 9_4_6, 9_4_4, 9_4_1, 9_3_9, 9_3_6, 9_3_4, 9_3_1, 9_2_9, 9_2_6, 9_2_4, 9_2_1, 9_1_9, 9_1_6, 9_1_4, 9_1_3, 9_1_0, 9_0_7, 9_0_5, 9_0_2, 8_9_9, 8_9_6, 8_9_3, 8_9_1, 8_8_8, 8_8_5, 8_8_2, 8_7_9, 8_7_7, 8_7_4, 8_7_1, 8_7_0, 8_6_7, 8_6_4, 8_6_1, 8_5_8, 8_5_5, 8_5_2, 8_4_9, 8_4_6, 8_4_3, 8_4_0, 8_3_7, 8_3_4, 8_3_1, 8_2_8, 8_2_7, 8_2_4, 8_2_1, 8_1_7, 8_1_4, 8_1_1, 8_0_8, 8_0_4, 8_0_1, 7_9_8, 7_9_5, 7_9_1, 7_8_8, 7_8_5, 7_8_4, 7_8_0, 7_7_7, 7_7_4, 7_7_0, 7_6_6, 7_6_3, 7_6_0, 7_5_6, 7_5_2, 7_4_9, 7_4_6, 7_4_2, 7_4_1, 7_3_7, 7_3_3, 7_3_0, 7_2_6, 7_2_2, 7_1_8, 7_1_4, 7_1_0, 7_0_7, 7_0_3, 6_9_9, 6_9_8, 6_9_4, 6_9_0, 6_8_5, 6_8_1, 6_7_7, 6_7_3, 6_6_9, 6_6_4, 6_6_0, 6_5_6, 6_5_5, 6_5_0, 6_4_6, 6_4_1, 6_3_6, 6_3_2, 6_2_7, 6_2_2, 6_1_8, 6_1_3, 6_1_2, 6_0_7, 6_0_2, 5_9_6, 5_9_1, 5_8_6, 5_8_0, 5_7_5, 5_7_0, 5_6_9, 5_6_3, 5_5_7, 5_5_1, 5_4_5, 5_3_9, 5_3_3, 5_2_7, 5_2_6, 5_1_9, 5_1_2, 5_0_5, 4_9_8, 4_9_1, 4_8_4, 4_8_3, 4_7_4, 4_6_6, 4_5_7, 4_4_9, 4_4_0, 4_3_9, 4_2_8, 4_1_8, 4_0_7, 3_9_6, 3_9_5, 3_8_1, 3_6_6, 3_5_2, 3_5_1, 3_3_0, 3_0_8, 3_0_7, 2_8_6, 2_6_4, 2_6_3, 2_4_2, 2_2_0, 2_1_9, 1_7_6, 1_7_5, 1_3_2, 1_3_1, 8_8, 4_4, 0, ] _UpperCamelCase: str = [ 9_9_9, 9_9_1, 9_8_2, 9_7_4, 9_6_6, 9_5_8, 9_5_0, 9_4_1, 9_3_3, 9_2_5, 9_1_6, 9_0_8, 9_0_0, 8_9_9, 8_7_4, 8_5_0, 8_2_5, 8_0_0, 7_9_9, 7_0_0, 6_0_0, 5_0_0, 4_0_0, 3_0_0, 2_0_0, 1_0_0, 0, ] _UpperCamelCase: Optional[Any] = [ 9_9_9, 9_9_2, 9_8_5, 9_7_8, 9_7_1, 9_6_4, 9_5_7, 9_4_9, 9_4_2, 9_3_5, 9_2_8, 9_2_1, 9_1_4, 9_0_7, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_0_0, 2_9_9, 2_0_0, 1_9_9, 1_0_0, 9_9, 0, ] _UpperCamelCase: Optional[int] = [ 9_9_9, 9_9_6, 9_9_2, 9_8_9, 9_8_5, 9_8_2, 9_7_9, 9_7_5, 9_7_2, 9_6_8, 9_6_5, 9_6_1, 9_5_8, 9_5_5, 9_5_1, 9_4_8, 9_4_4, 9_4_1, 9_3_8, 9_3_4, 9_3_1, 9_2_7, 9_2_4, 9_2_0, 9_1_7, 9_1_4, 9_1_0, 9_0_7, 9_0_3, 9_0_0, 8_9_9, 8_9_1, 8_8_4, 8_7_6, 8_6_9, 8_6_1, 8_5_3, 8_4_6, 8_3_8, 8_3_0, 8_2_3, 8_1_5, 8_0_8, 8_0_0, 7_9_9, 7_8_8, 7_7_7, 7_6_6, 7_5_5, 7_4_4, 7_3_3, 7_2_2, 7_1_1, 7_0_0, 6_9_9, 6_8_8, 6_7_7, 6_6_6, 6_5_5, 6_4_4, 6_3_3, 6_2_2, 6_1_1, 6_0_0, 5_9_9, 5_8_5, 5_7_1, 5_5_7, 5_4_2, 5_2_8, 5_1_4, 5_0_0, 4_9_9, 4_8_5, 4_7_1, 4_5_7, 4_4_2, 4_2_8, 4_1_4, 4_0_0, 3_9_9, 3_7_9, 3_5_9, 3_4_0, 3_2_0, 3_0_0, 2_9_9, 2_7_9, 2_5_9, 2_4_0, 2_2_0, 2_0_0, 1_9_9, 1_6_6, 1_3_3, 1_0_0, 9_9, 6_6, 3_3, 0, ]
255
1
import math import os import sys def _SCREAMING_SNAKE_CASE ( lowercase : str ): '''simple docstring''' lowerCamelCase_ = """""" try: with open(__lowerCAmelCase , 'rb' ) as binary_file: lowerCamelCase_ = binary_file.read() for dat in data: lowerCamelCase_ = f"""{dat:08b}""" result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : List[str] , lowercase : List[Any] , lowercase : int ): '''simple docstring''' lexicon.pop(__lowerCAmelCase ) lowerCamelCase_ = last_match_id if math.loga(__lowerCAmelCase ).is_integer(): for curr_key in lexicon: lowerCamelCase_ = """0""" + lexicon[curr_key] lowerCamelCase_ = bin(__lowerCAmelCase )[2:] def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ): '''simple docstring''' lowerCamelCase_ = {"""0""": """0""", """1""": """1"""} lowerCamelCase_ = """""", """""" lowerCamelCase_ = len(__lowerCAmelCase ) for i in range(len(__lowerCAmelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue lowerCamelCase_ = lexicon[curr_string] result += last_match_id add_key_to_lexicon(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) index += 1 lowerCamelCase_ = """""" while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": lowerCamelCase_ = lexicon[curr_string] result += last_match_id return result def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : List[str] ): '''simple docstring''' lowerCamelCase_ = os.path.getsize(__lowerCAmelCase ) lowerCamelCase_ = bin(__lowerCAmelCase )[2:] lowerCamelCase_ = len(__lowerCAmelCase ) return "0" * (length_length - 1) + file_length_binary + compressed def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Dict ): '''simple docstring''' lowerCamelCase_ = 8 try: with open(__lowerCAmelCase , 'wb' ) as opened_file: lowerCamelCase_ = [ to_write[i : i + byte_length] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(__lowerCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : Any ): '''simple docstring''' lowerCamelCase_ = read_file_binary(__lowerCAmelCase ) lowerCamelCase_ = compress_data(__lowerCAmelCase ) lowerCamelCase_ = add_file_length(__lowerCAmelCase , __lowerCAmelCase ) write_file_binary(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
362
import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) lowerCamelCase : int = logging.getLogger() lowerCamelCase : Tuple = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class A( UpperCamelCase ): '''simple docstring''' def a__ ( self : List[str] , A_ : Optional[Any] ) -> int: """simple docstring""" os.makedirs(A_ , exist_ok=A_ ) lowerCamelCase_ = {'source': 'What is love ?', 'target': 'life'} lowerCamelCase_ = {'train': 12, 'val': 2, 'test': 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: lowerCamelCase_ = '\n'.join([contents[field]] * n_lines[split] ) with open(os.path.join(A_ , f"""{split}.{field}""" ) , 'w' ) as f: f.write(A_ ) def a__ ( self : Optional[Any] , A_ : int , A_ : str = "pytorch" ) -> Any: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = os.path.join(A_ , 'output' ) lowerCamelCase_ = os.path.join(A_ , 'data' ) self._create_dummy_data(data_dir=A_ ) lowerCamelCase_ = f""" --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ """.split() if gpus > 0: testargs.append(f"""--gpus={gpus}""" ) if is_apex_available(): testargs.append('--fp16' ) else: testargs.append('--gpus=0' ) testargs.append('--distributed_backend=ddp_cpu' ) testargs.append('--num_processes=2' ) lowerCamelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(A_ , env=self.get_env() ) lowerCamelCase_ = os.path.join(A_ , 'metrics.json' ) with open(A_ ) as f: lowerCamelCase_ = json.load(A_ ) return result @require_torch_gpu def a__ ( self : List[Any] ) -> Any: """simple docstring""" lowerCamelCase_ = self._run_finetune(gpus=1 ) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 ) @require_torch_multi_gpu def a__ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self._run_finetune(gpus=2 ) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 ) @require_torch_gpu @require_ray def a__ ( self : Tuple ) -> int: """simple docstring""" lowerCamelCase_ = self._run_finetune(gpus=1 , distributed_retriever='ray' ) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 ) @require_torch_multi_gpu @require_ray def a__ ( self : Dict ) -> Dict: """simple docstring""" lowerCamelCase_ = self._run_finetune(gpus=1 , distributed_retriever='ray' ) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
208
0
"""simple docstring""" from collections.abc import Callable def lowercase__ ( snake_case_ :Callable[[float], float] , snake_case_ :float , snake_case_ :float ): __UpperCAmelCase = a __UpperCAmelCase = b if function(snake_case_ ) == 0: # one of the a or b is a root for the function return a elif function(snake_case_ ) == 0: return b elif ( function(snake_case_ ) * function(snake_case_ ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: __UpperCAmelCase = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(snake_case_ ) == 0: return mid elif function(snake_case_ ) * function(snake_case_ ) < 0: __UpperCAmelCase = mid else: __UpperCAmelCase = mid __UpperCAmelCase = start + (end - start) / 2.0 return mid def lowercase__ ( snake_case_ :float ): return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 10_00)) import doctest doctest.testmod()
332
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule _lowercase : int = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
332
1
from scipy.stats import pearsonr import datasets lowerCamelCase__ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' lowerCamelCase__ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' lowerCamelCase__ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , ) def _lowerCamelCase ( self : Any , a : int , a : str , a : str=False ): '''simple docstring''' if return_pvalue: lowerCAmelCase__ : int = pearsonr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0] )}
366
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = { 'task_specific_params': { 'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4}, 'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4}, 'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6}, } } lowerCAmelCase__ : int = { 'task_specific_params.summarization.length_penalty': 1.0, 'task_specific_params.summarization.max_length': 128, 'task_specific_params.summarization.min_length': 12, 'task_specific_params.summarization.num_beams': 4, 'task_specific_params.summarization_cnn.length_penalty': 2.0, 'task_specific_params.summarization_cnn.max_length': 142, 'task_specific_params.summarization_cnn.min_length': 56, 'task_specific_params.summarization_cnn.num_beams': 4, 'task_specific_params.summarization_xsum.length_penalty': 1.0, 'task_specific_params.summarization_xsum.max_length': 62, 'task_specific_params.summarization_xsum.min_length': 11, 'task_specific_params.summarization_xsum.num_beams': 6, } self.assertEqual(flatten_dict(a ) , a ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) ) lowerCAmelCase__ : List[str] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 ) lowerCAmelCase__ : List[Any] = torch.tensor(a ) self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) ) lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ : int = torch.tensor(a ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) ) @require_tf def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = np.random.randn(3 , 4 ) lowerCAmelCase__ : Any = tf.constant(a ) self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) ) lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ : Dict = tf.constant(a ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) ) @require_flax def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 ) lowerCAmelCase__ : int = jnp.array(a ) self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) ) lowerCAmelCase__ : Any = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ : str = jnp.array(a ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Any = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) ) lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) ) @require_torch def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 ) lowerCAmelCase__ : Dict = torch.tensor(a ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) ) lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ : str = torch.tensor(a ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) ) @require_tf def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Dict = np.random.randn(3 , 4 ) lowerCAmelCase__ : List[Any] = tf.constant(a ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) ) lowerCAmelCase__ : Dict = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ : Any = tf.constant(a ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) ) @require_flax def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Dict = np.random.randn(3 , 4 ) lowerCAmelCase__ : List[str] = jnp.array(a ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) ) lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ : Union[str, Any] = jnp.array(a ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) ) lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) ) @require_torch def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = np.random.randn(1 , 3 , 4 ) lowerCAmelCase__ : str = torch.tensor(a ) self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) ) lowerCAmelCase__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 ) lowerCAmelCase__ : Dict = torch.tensor(a ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) ) @require_tf def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 ) lowerCAmelCase__ : Any = tf.constant(a ) self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) ) lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 ) lowerCAmelCase__ : str = tf.constant(a ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) ) @require_flax def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 ) lowerCAmelCase__ : Union[str, Any] = jnp.array(a ) self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) ) lowerCAmelCase__ : str = np.random.randn(1 , 4 , 1 , 5 ) lowerCAmelCase__ : Optional[Any] = jnp.array(a ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) ) @require_torch def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : str = np.random.randn(3 , 4 ) lowerCAmelCase__ : str = torch.tensor(a ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) ) @require_tf def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 ) lowerCAmelCase__ : Any = tf.constant(a ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) ) @require_flax def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : int = np.random.randn(3 , 4 ) lowerCAmelCase__ : Tuple = jnp.array(a ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) )
307
0
'''simple docstring''' from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent _lowerCAmelCase = {'''UserAgent''': UserAgent().random} def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Optional[Any] = script.contents[0] __UpperCamelCase : int = json.loads(data[data.find("{\"config\"" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class A : '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Any: __UpperCamelCase : Union[str, Any] = f"https://www.instagram.com/{username}/" __UpperCamelCase : Optional[Any] = self.get_json() def a_ (self ) -> dict: __UpperCamelCase : Optional[int] = requests.get(self.url , headers=__lowerCAmelCase ).text __UpperCamelCase : str = BeautifulSoup(__lowerCAmelCase , "html.parser" ).find_all("script" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__(self ) -> str: return f"{self.__class__.__name__}(\'{self.username}\')" def __str__(self ) -> str: return f"{self.fullname} ({self.username}) is {self.biography}" @property def a_ (self ) -> str: return self.user_data["username"] @property def a_ (self ) -> str: return self.user_data["full_name"] @property def a_ (self ) -> str: return self.user_data["biography"] @property def a_ (self ) -> str: return self.user_data["business_email"] @property def a_ (self ) -> str: return self.user_data["external_url"] @property def a_ (self ) -> int: return self.user_data["edge_followed_by"]["count"] @property def a_ (self ) -> int: return self.user_data["edge_follow"]["count"] @property def a_ (self ) -> int: return self.user_data["edge_owner_to_timeline_media"]["count"] @property def a_ (self ) -> str: return self.user_data["profile_pic_url_hd"] @property def a_ (self ) -> bool: return self.user_data["is_verified"] @property def a_ (self ) -> bool: return self.user_data["is_private"] def __lowerCAmelCase ( snake_case__ = "github" ): import os if os.environ.get("CI" ): return # test failing on GitHub Actions __UpperCamelCase : Optional[int] = InstagramUser(__a ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , __a ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120_000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("https://instagram." ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase = InstagramUser('''github''') print(instagram_user) print(f'{instagram_user.number_of_posts = }') print(f'{instagram_user.number_of_followers = }') print(f'{instagram_user.number_of_followings = }') print(f'{instagram_user.email = }') print(f'{instagram_user.website = }') print(f'{instagram_user.profile_picture_url = }') print(f'{instagram_user.is_verified = }') print(f'{instagram_user.is_private = }')
298
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class A (unittest.TestCase ): '''simple docstring''' def a_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" A__ = tempfile.mkdtemp() # fmt: off A__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on A__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) A__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] A__ = {"""unk_token""": """<unk>"""} A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__lowerCAmelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__lowerCAmelCase ) ) A__ = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], """image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } A__ = os.path.join(self.tmpdirname , __lowerCAmelCase ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def a_ ( self : Tuple , **__lowerCAmelCase : Dict ) -> str: """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def a_ ( self : Union[str, Any] , **__lowerCAmelCase : Dict ) -> List[str]: """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def a_ ( self : List[str] , **__lowerCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def a_ ( self : str ) -> Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a_ ( self : str ) -> Any: """simple docstring""" A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] A__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a_ ( self : Optional[int] ) -> Tuple: """simple docstring""" A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = self.get_image_processor() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) A__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase ) A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) A__ = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase ) def a_ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" A__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) A__ = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) A__ = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def a_ ( self : List[Any] ) -> Dict: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = self.prepare_image_inputs() A__ = image_processor(__lowerCAmelCase , return_tensors="""np""" ) A__ = processor(images=__lowerCAmelCase , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a_ ( self : Optional[Any] ) -> Any: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = """lower newer""" A__ = processor(text=__lowerCAmelCase ) A__ = tokenizer(__lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = """lower newer""" A__ = self.prepare_image_inputs() A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def a_ ( self : Tuple ) -> str: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A__ = processor.batch_decode(__lowerCAmelCase ) A__ = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def a_ ( self : Optional[int] ) -> str: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = """lower newer""" A__ = self.prepare_image_inputs() A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
274
0
import math class _UpperCamelCase : '''simple docstring''' def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : list[list[float]] , snake_case_ : list[int] ): UpperCamelCase_: List[str] = 0.0 UpperCamelCase_: str = 0.0 for i in range(len(UpperCAmelCase_ ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowerCAmelCase__ ( self : Dict , snake_case_ : list[list[int | float]] , snake_case_ : list[int] , snake_case_ : int , snake_case_ : float ): for i in range(len(UpperCAmelCase_ ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def A__ ( ) -> Dict: # Training Examples ( m, n ) UpperCamelCase_: List[Any] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) UpperCamelCase_: Tuple = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training UpperCamelCase_: str = SelfOrganizingMap() UpperCamelCase_: Optional[int] = 3 UpperCamelCase_: str = 0.5 for _ in range(_UpperCAmelCase ): for j in range(len(_UpperCAmelCase ) ): # training sample UpperCamelCase_: int = training_samples[j] # Compute the winning vector UpperCamelCase_: Union[str, Any] = self_organizing_map.get_winner(_UpperCAmelCase , _UpperCAmelCase ) # Update the winning vector UpperCamelCase_: List[str] = self_organizing_map.update(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # classify test sample UpperCamelCase_: int = [0, 0, 0, 1] UpperCamelCase_: Tuple = self_organizing_map.get_winner(_UpperCAmelCase , _UpperCAmelCase ) # results print(F'''Clusters that the test sample belongs to : {winner}''' ) print(F'''Weights that have been trained : {weights}''' ) # running the main() function if __name__ == "__main__": main()
365
import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList lowerCamelCase_ : str = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""] class _UpperCamelCase ( _A ): '''simple docstring''' def __init__( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any]=None , snake_case_ : str=1 ): UpperCamelCase_: List[str] = tokenizer UpperCamelCase_: str = dataset UpperCamelCase_: List[str] = len(snake_case_ ) if n_tasks is None else n_tasks UpperCamelCase_: str = n_copies def __iter__( self : Tuple ): UpperCamelCase_: Optional[int] = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() ) UpperCamelCase_: List[str] = self.tokenizer(snake_case_ , padding=snake_case_ , return_tensors="""pt""" ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class _UpperCamelCase ( _A ): '''simple docstring''' def __init__( self : Any , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : int ): UpperCamelCase_: Union[str, Any] = start_length UpperCamelCase_: Dict = eof_strings UpperCamelCase_: List[str] = tokenizer def __call__( self : Tuple , snake_case_ : List[str] , snake_case_ : Optional[Any] , **snake_case_ : int ): UpperCamelCase_: Union[str, Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) UpperCamelCase_: Dict = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(snake_case_ ) def A__ ( lowerCamelCase ) -> Optional[int]: UpperCamelCase_: str = re.split("""(%s)""" % """|""".join(lowerCamelCase ) , lowerCamelCase ) # last string should be "" return "".join(string_list[:-2] ) def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=20 , **lowerCamelCase ) -> int: UpperCamelCase_: str = defaultdict(lowerCamelCase ) # dict of list of generated tokens for step, batch in tqdm(enumerate(lowerCamelCase ) ): with torch.no_grad(): UpperCamelCase_: Optional[int] = batch["""ids"""].shape[-1] UpperCamelCase_: Dict = accelerator.unwrap_model(lowerCamelCase ).generate( input_ids=batch["""ids"""][:, : batch["""input_len"""]] , num_return_sequences=lowerCamelCase , **lowerCamelCase ) # each task is generated batch_size times UpperCamelCase_: Optional[int] = batch["""task_id"""].repeat(lowerCamelCase ) UpperCamelCase_: int = accelerator.pad_across_processes( lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id ) UpperCamelCase_, UpperCamelCase_: Tuple = accelerator.gather((generated_tokens, generated_tasks) ) UpperCamelCase_: Tuple = generated_tokens.cpu().numpy() UpperCamelCase_: Any = generated_tasks.cpu().numpy() for task, generated_tokens in zip(lowerCamelCase , lowerCamelCase ): gen_token_dict[task].append(lowerCamelCase ) UpperCamelCase_: Dict = [[] for _ in range(lowerCamelCase )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: UpperCamelCase_: Any = tokenizer.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase ) code_gens[task].append(remove_last_block(lowerCamelCase ) ) return code_gens def A__ ( ) -> Union[str, Any]: # Setup configuration UpperCamelCase_: Optional[Any] = HfArgumentParser(lowerCamelCase ) UpperCamelCase_: str = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric UpperCamelCase_: List[str] = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing UpperCamelCase_: Union[str, Any] = """false""" if args.num_workers is None: UpperCamelCase_: Dict = multiprocessing.cpu_count() # Use dataset load to feed to accelerate UpperCamelCase_: List[Any] = Accelerator() set_seed(args.seed , device_specific=lowerCamelCase ) # Load model and tokenizer UpperCamelCase_: Any = AutoTokenizer.from_pretrained(args.model_ckpt ) UpperCamelCase_: Union[str, Any] = tokenizer.eos_token UpperCamelCase_: Optional[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings UpperCamelCase_: Union[str, Any] = { """do_sample""": args.do_sample, """temperature""": args.temperature, """max_new_tokens""": args.max_new_tokens, """top_p""": args.top_p, """top_k""": args.top_k, """stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0 , lowerCamelCase , lowerCamelCase )] ), } # Load evaluation dataset and metric UpperCamelCase_: Any = load_dataset("""openai_humaneval""" ) UpperCamelCase_: Union[str, Any] = load_metric("""code_eval""" ) UpperCamelCase_: Union[str, Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] ) UpperCamelCase_: List[Any] = args.n_samples // args.batch_size UpperCamelCase_: Any = TokenizedDataset(lowerCamelCase , human_eval["""test"""] , n_copies=lowerCamelCase , n_tasks=lowerCamelCase ) # do not confuse args.batch_size, which is actually the num_return_sequences UpperCamelCase_: Optional[int] = DataLoader(lowerCamelCase , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: UpperCamelCase_: List[str] = code_eval_metric.compute(references=[""""""] , predictions=[[""""""]] ) except ValueError as exception: print( """Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`""" """ flag to enable code evaluation.""" ) raise exception UpperCamelCase_, UpperCamelCase_: Dict = accelerator.prepare(lowerCamelCase , lowerCamelCase ) UpperCamelCase_: Union[str, Any] = complete_code( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , n_tasks=lowerCamelCase , batch_size=args.batch_size , **lowerCamelCase , ) if accelerator.is_main_process: UpperCamelCase_: List[Any] = [] for task in tqdm(range(lowerCamelCase ) ): UpperCamelCase_: Optional[Any] = human_eval["""test"""][task]["""test"""] UpperCamelCase_: Optional[int] = F'''check({human_eval["test"][task]["entry_point"]})''' references.append("""\n""" + test_func + """\n""" + entry_point ) # Evaluate completions with "code_eval" metric UpperCamelCase_, UpperCamelCase_: str = code_eval_metric.compute( references=lowerCamelCase , predictions=lowerCamelCase , num_workers=args.num_workers ) print(F'''Results: {pass_at_k}''' ) # Save results to json file with open(args.output_file , """w""" ) as fp: json.dump(lowerCamelCase , lowerCamelCase ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
223
0
"""simple docstring""" import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils ) _lowerCamelCase : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) _lowerCamelCase : Any = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Tuple = f''' {self.test_dir}/xla_spawn.py --num_cores 8 {self.test_file_path} '''.split() _lowerCamelCase : List[Any] = [sys.executable] + distributed_args execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy() )
72
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class __snake_case : def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=3_2 * 4 , __lowerCAmelCase : Dict=3_2 * 6 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3_2 , ): """simple docstring""" _lowerCamelCase : List[str] = parent _lowerCamelCase : str = batch_size _lowerCamelCase : Dict = is_training _lowerCamelCase : str = use_auxiliary_loss _lowerCamelCase : Any = num_queries _lowerCamelCase : List[Any] = num_channels _lowerCamelCase : int = min_size _lowerCamelCase : Any = max_size _lowerCamelCase : int = num_labels _lowerCamelCase : List[str] = mask_feature_size def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowerCAmelCase ) _lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5 ).float() _lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long() _lowerCamelCase : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.prepare_config_and_inputs() _lowerCamelCase : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = output.encoder_hidden_states _lowerCamelCase : Tuple = output.pixel_decoder_hidden_states _lowerCamelCase : Dict = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers ) def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ): """simple docstring""" with torch.no_grad(): _lowerCamelCase : Optional[int] = MaskFormerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Tuple = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : List[str] = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() def comm_check_on_output(__lowerCAmelCase : Dict ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _lowerCamelCase : str = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) _lowerCamelCase : List[str] = model( pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __snake_case ( _lowercase , _lowercase , unittest.TestCase): snake_case__ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () snake_case__ : Any = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) snake_case__ : List[str] = False snake_case__ : List[str] = False snake_case__ : Optional[int] = False snake_case__ : Dict = False def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = MaskFormerModelTester(self ) _lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Dict = [*signature.parameters.keys()] _lowerCamelCase : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: _lowerCamelCase : Union[str, Any] = MaskFormerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2 _lowerCamelCase : Union[str, Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=__lowerCAmelCase ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowerCAmelCase ), '''class_labels''': torch.zeros(2 , 1_0 , device=__lowerCAmelCase ).long(), } _lowerCamelCase : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _lowerCamelCase : Union[str, Any] = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : Any = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : List[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : int = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[int] = True _lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) _lowerCamelCase : List[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCamelCase : Tuple = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _lowerCamelCase : List[str] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCamelCase : Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) lowerCAmelCase__ = 1E-4 def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class __snake_case ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : str = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = self.default_image_processor _lowerCamelCase : List[Any] = prepare_img() _lowerCamelCase : Any = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : int = model(**__lowerCAmelCase ) _lowerCamelCase : str = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Optional[int] = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[int] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : str = prepare_img() _lowerCamelCase : int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : List[str] = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] _lowerCamelCase : Any = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : str = torch.tensor( [ [1.6512E00, -5.2572E00, -3.3519E00], [3.6169E-02, -5.9025E00, -2.9313E00], [1.0766E-04, -7.7630E00, -5.1263E00], ] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Any = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : Tuple = self.default_image_processor _lowerCamelCase : Tuple = prepare_img() _lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : List[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : int = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] _lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : Dict = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : Any = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : List[str] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) _lowerCamelCase : Union[str, Any] = inputs['''pixel_values'''].to(__lowerCAmelCase ) _lowerCamelCase : Dict = [el.to(__lowerCAmelCase ) for el in inputs['''mask_labels''']] _lowerCamelCase : Optional[Any] = [el.to(__lowerCAmelCase ) for el in inputs['''class_labels''']] with torch.no_grad(): _lowerCamelCase : Tuple = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
72
1
"""simple docstring""" from __future__ import annotations lowerCAmelCase_ = 1.6021E-19 # units = C def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> tuple[str, float]: if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif conductivity < 0: raise ValueError('''Conductivity cannot be negative''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative''' ) elif mobility < 0: raise ValueError('''mobility cannot be negative''' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
302
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig lowerCAmelCase_ = [ 'openmmlab/upernet-convnext-tiny', # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring lowerCAmelCase_ = 'UperNetConfig' class __A ( nn.Module ): '''simple docstring''' def __init__( self : List[str] ,_snake_case : int ,_snake_case : int ,_snake_case : Union[int, Tuple[int, int]] ,_snake_case : Union[int, Tuple[int, int], str] = 0 ,_snake_case : bool = False ,_snake_case : Union[int, Tuple[int, int]] = 1 ,) -> None: """simple docstring""" super().__init__() lowercase__ : Optional[int] = nn.Convad( in_channels=_snake_case ,out_channels=_snake_case ,kernel_size=_snake_case ,padding=_snake_case ,bias=_snake_case ,dilation=_snake_case ,) lowercase__ : Tuple = nn.BatchNormad(_snake_case ) lowercase__ : List[str] = nn.ReLU() def UpperCAmelCase ( self : str ,_snake_case : torch.Tensor ) -> torch.Tensor: """simple docstring""" lowercase__ : Union[str, Any] = self.conv(_snake_case ) lowercase__ : List[str] = self.batch_norm(_snake_case ) lowercase__ : Tuple = self.activation(_snake_case ) return output class __A ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] ,_snake_case : int ,_snake_case : int ,_snake_case : int ) -> None: """simple docstring""" super().__init__() lowercase__ : List[Any] = [ nn.AdaptiveAvgPoolad(_snake_case ), UperNetConvModule(_snake_case ,_snake_case ,kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(_snake_case ) ,_snake_case ) def UpperCAmelCase ( self : Dict ,_snake_case : torch.Tensor ) -> torch.Tensor: """simple docstring""" lowercase__ : Any = input for layer in self.layers: lowercase__ : int = layer(_snake_case ) return hidden_state class __A ( nn.Module ): '''simple docstring''' def __init__( self : List[str] ,_snake_case : Tuple[int, ...] ,_snake_case : int ,_snake_case : int ,_snake_case : bool ) -> None: """simple docstring""" super().__init__() lowercase__ : int = pool_scales lowercase__ : Dict = align_corners lowercase__ : Optional[Any] = in_channels lowercase__ : Optional[Any] = channels lowercase__ : int = [] for i, pool_scale in enumerate(_snake_case ): lowercase__ : Optional[Any] = UperNetPyramidPoolingBlock(pool_scale=_snake_case ,in_channels=_snake_case ,channels=_snake_case ) self.blocks.append(_snake_case ) self.add_module(str(_snake_case ) ,_snake_case ) def UpperCAmelCase ( self : Any ,_snake_case : torch.Tensor ) -> List[torch.Tensor]: """simple docstring""" lowercase__ : int = [] for ppm in self.blocks: lowercase__ : Any = ppm(_snake_case ) lowercase__ : int = nn.functional.interpolate( _snake_case ,size=x.size()[2:] ,mode='''bilinear''' ,align_corners=self.align_corners ) ppm_outs.append(_snake_case ) return ppm_outs class __A ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Union[str, Any] ) -> str: """simple docstring""" super().__init__() lowercase__ : str = config lowercase__ : Optional[Any] = config.pool_scales # e.g. (1, 2, 3, 6) lowercase__ : Optional[Any] = in_channels lowercase__ : Any = config.hidden_size lowercase__ : Optional[Any] = False lowercase__ : Optional[int] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 ) # PSP Module lowercase__ : Dict = UperNetPyramidPoolingModule( self.pool_scales ,self.in_channels[-1] ,self.channels ,align_corners=self.align_corners ,) lowercase__ : str = UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,) # FPN Module lowercase__ : Any = nn.ModuleList() lowercase__ : Union[str, Any] = nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer lowercase__ : List[Any] = UperNetConvModule(_snake_case ,self.channels ,kernel_size=1 ) lowercase__ : Optional[int] = UperNetConvModule(self.channels ,self.channels ,kernel_size=3 ,padding=1 ) self.lateral_convs.append(_snake_case ) self.fpn_convs.append(_snake_case ) lowercase__ : int = UperNetConvModule( len(self.in_channels ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,) def UpperCAmelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" self.apply(self._init_weights ) def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[Any] ) -> List[str]: """simple docstring""" if isinstance(_snake_case ,nn.Convad ): module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[Any] ) -> str: """simple docstring""" lowercase__ : Dict = inputs[-1] lowercase__ : Optional[int] = [x] psp_outs.extend(self.psp_modules(_snake_case ) ) lowercase__ : Optional[Any] = torch.cat(_snake_case ,dim=1 ) lowercase__ : List[str] = self.bottleneck(_snake_case ) return output def UpperCAmelCase ( self : List[str] ,_snake_case : torch.Tensor ) -> torch.Tensor: """simple docstring""" lowercase__ : Tuple = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(_snake_case ) ) # build top-down path lowercase__ : List[Any] = len(_snake_case ) for i in range(used_backbone_levels - 1 ,0 ,-1 ): lowercase__ : Union[str, Any] = laterals[i - 1].shape[2:] lowercase__ : int = laterals[i - 1] + nn.functional.interpolate( laterals[i] ,size=_snake_case ,mode='''bilinear''' ,align_corners=self.align_corners ) # build outputs lowercase__ : List[str] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1 ,0 ,-1 ): lowercase__ : Any = nn.functional.interpolate( fpn_outs[i] ,size=fpn_outs[0].shape[2:] ,mode='''bilinear''' ,align_corners=self.align_corners ) lowercase__ : Any = torch.cat(_snake_case ,dim=1 ) lowercase__ : Any = self.fpn_bottleneck(_snake_case ) lowercase__ : str = self.classifier(_snake_case ) return output class __A ( nn.Module ): '''simple docstring''' def __init__( self : Dict ,_snake_case : List[Any] ,_snake_case : int = 2 ,_snake_case : int = 3 ,_snake_case : Union[int, Tuple[int, int]] = 1 ) -> None: """simple docstring""" super().__init__() lowercase__ : int = config lowercase__ : Dict = config.auxiliary_in_channels lowercase__ : Optional[int] = config.auxiliary_channels lowercase__ : List[Any] = config.auxiliary_num_convs lowercase__ : List[Any] = config.auxiliary_concat_input lowercase__ : str = in_index lowercase__ : Any = (kernel_size // 2) * dilation lowercase__ : Optional[Any] = [] convs.append( UperNetConvModule( self.in_channels ,self.channels ,kernel_size=_snake_case ,padding=_snake_case ,dilation=_snake_case ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels ,self.channels ,kernel_size=_snake_case ,padding=_snake_case ,dilation=_snake_case ) ) if self.num_convs == 0: lowercase__ : List[str] = nn.Identity() else: lowercase__ : Dict = nn.Sequential(*_snake_case ) if self.concat_input: lowercase__ : int = UperNetConvModule( self.in_channels + self.channels ,self.channels ,kernel_size=_snake_case ,padding=kernel_size // 2 ) lowercase__ : List[str] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 ) def UpperCAmelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" self.apply(self._init_weights ) def UpperCAmelCase ( self : List[Any] ,_snake_case : List[Any] ) -> Dict: """simple docstring""" if isinstance(_snake_case ,nn.Convad ): module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def UpperCAmelCase ( self : List[str] ,_snake_case : torch.Tensor ) -> torch.Tensor: """simple docstring""" lowercase__ : str = encoder_hidden_states[self.in_index] lowercase__ : List[str] = self.convs(_snake_case ) if self.concat_input: lowercase__ : Any = self.conv_cat(torch.cat([hidden_states, output] ,dim=1 ) ) lowercase__ : Dict = self.classifier(_snake_case ) return output class __A ( A_ ): '''simple docstring''' lowerCAmelCase : Any = UperNetConfig lowerCAmelCase : str = "pixel_values" lowerCAmelCase : Dict = True def UpperCAmelCase ( self : int ,_snake_case : str ) -> Optional[int]: """simple docstring""" if isinstance(_snake_case ,_snake_case ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def UpperCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def UpperCAmelCase ( self : int ,_snake_case : str ,_snake_case : str=False ) -> List[str]: """simple docstring""" if isinstance(_snake_case ,_snake_case ): lowercase__ : List[Any] = value lowerCAmelCase_ = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." ,A_ ,) class __A ( A_ ): '''simple docstring''' def __init__( self : Optional[Any] ,_snake_case : Tuple ) -> int: """simple docstring""" super().__init__(_snake_case ) lowercase__ : int = AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) lowercase__ : Any = UperNetHead(_snake_case ,in_channels=self.backbone.channels ) lowercase__ : str = UperNetFCNHead(_snake_case ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) ) @replace_return_docstrings(output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ) def UpperCAmelCase ( self : Dict ,_snake_case : Optional[torch.Tensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[torch.Tensor] = None ,_snake_case : Optional[bool] = None ,) -> Union[tuple, SemanticSegmenterOutput]: """simple docstring""" lowercase__ : int = return_dict if return_dict is not None else self.config.use_return_dict lowercase__ : Any = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase__ : Any = output_attentions if output_attentions is not None else self.config.output_attentions lowercase__ : Optional[Any] = self.backbone.forward_with_filtered_kwargs( _snake_case ,output_hidden_states=_snake_case ,output_attentions=_snake_case ) lowercase__ : Optional[int] = outputs.feature_maps lowercase__ : Tuple = self.decode_head(_snake_case ) lowercase__ : Optional[int] = nn.functional.interpolate(_snake_case ,size=pixel_values.shape[2:] ,mode='''bilinear''' ,align_corners=_snake_case ) lowercase__ : List[str] = None if self.auxiliary_head is not None: lowercase__ : str = self.auxiliary_head(_snake_case ) lowercase__ : Dict = nn.functional.interpolate( _snake_case ,size=pixel_values.shape[2:] ,mode='''bilinear''' ,align_corners=_snake_case ) lowercase__ : Any = None if labels is not None: if self.config.num_labels == 1: raise ValueError('''The number of labels should be greater than one''' ) else: # compute weighted loss lowercase__ : Union[str, Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) lowercase__ : List[str] = loss_fct(_snake_case ,_snake_case ) lowercase__ : List[str] = loss_fct(_snake_case ,_snake_case ) lowercase__ : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: lowercase__ : Tuple = (logits,) + outputs[1:] else: lowercase__ : int = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,)
302
1
import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class a ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def UpperCAmelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) _A = UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , ) return model @property def UpperCAmelCase ( self ) -> List[Any]: torch.manual_seed(0 ) _A = UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , ) return model @property def UpperCAmelCase ( self ) -> Dict: torch.manual_seed(0 ) _A = AutoencoderKL( sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , ) _A = UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , ) return vqvae, unet @slow def UpperCAmelCase ( self ) -> Optional[int]: _A = """cpu""" # ensure determinism for the device-dependent torch.Generator _A = Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) _A = DDPMScheduler() _A = AudioDiffusionPipeline(vqvae=lowerCAmelCase_ , unet=self.dummy_unet , mel=lowerCAmelCase_ , scheduler=lowerCAmelCase_ ) _A = pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(42 ) _A = pipe(generator=lowerCAmelCase_ , steps=4 ) _A = output.audios[0] _A = output.images[0] _A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(42 ) _A = pipe(generator=lowerCAmelCase_ , steps=4 , return_dict=lowerCAmelCase_ ) _A = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) _A = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] _A = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10] _A = np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 _A = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) _A = DDIMScheduler() _A = self.dummy_vqvae_and_unet _A = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowerCAmelCase_ , scheduler=lowerCAmelCase_ ) _A = pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) np.random.seed(0 ) _A = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) _A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(42 ) _A = pipe(raw_audio=lowerCAmelCase_ , generator=lowerCAmelCase_ , start_step=5 , steps=10 ) _A = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) _A = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] _A = np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 _A = self.dummy_unet_condition _A = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=lowerCAmelCase_ , mel=lowerCAmelCase_ , scheduler=lowerCAmelCase_ ) _A = pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) np.random.seed(0 ) _A = torch.rand((1, 1, 10) ) _A = pipe(generator=lowerCAmelCase_ , encoding=lowerCAmelCase_ ) _A = output.images[0] _A = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] _A = np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class a ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase ( self ) -> Union[str, Any]: _A = torch_device _A = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" ) _A = pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(42 ) _A = pipe(generator=lowerCAmelCase_ ) _A = output.audios[0] _A = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] _A = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] _A = np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
180
from __future__ import annotations _SCREAMING_SNAKE_CASE = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } class a : """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None: _A = graph # mapping node to its parent in resulting breadth first tree _A = {} _A = source_vertex def UpperCAmelCase ( self ) -> None: _A = {self.source_vertex} _A = None _A = [self.source_vertex] # first in first out queue while queue: _A = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(lowerCAmelCase_ ) _A = vertex queue.append(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: if target_vertex == self.source_vertex: return self.source_vertex _A = self.parent.get(lowerCAmelCase_ ) if target_vertex_parent is None: _A = ( F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}''' ) raise ValueError(lowerCAmelCase_ ) return self.shortest_path(lowerCAmelCase_ ) + F'''->{target_vertex}''' if __name__ == "__main__": _SCREAMING_SNAKE_CASE = Graph(graph, 'G') g.breath_first_search() print(g.shortest_path('D')) print(g.shortest_path('G')) print(g.shortest_path('Foo'))
180
1
a =frozenset( [ """prompt""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) a =frozenset(["""prompt""", """negative_prompt"""]) a =frozenset([]) a =frozenset(["""image"""]) a =frozenset( [ """image""", """height""", """width""", """guidance_scale""", ] ) a =frozenset(["""image"""]) a =frozenset( [ """prompt""", """image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) a =frozenset(["""prompt""", """image""", """negative_prompt"""]) a =frozenset( [ # Text guided image variation with an image mask """prompt""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) a =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""]) a =frozenset( [ # image variation with an image mask """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) a =frozenset(["""image""", """mask_image"""]) a =frozenset( [ """example_image""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) a =frozenset(["""example_image""", """image""", """mask_image"""]) a =frozenset(["""class_labels"""]) a =frozenset(["""class_labels"""]) a =frozenset(["""batch_size"""]) a =frozenset([]) a =frozenset(["""batch_size"""]) a =frozenset([]) a =frozenset( [ """prompt""", """audio_length_in_s""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) a =frozenset(["""prompt""", """negative_prompt"""]) a =frozenset(["""input_tokens"""]) a =frozenset(["""input_tokens"""])
358
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu a =[ """EAGER""", """AOT_EAGER""", """INDUCTOR""", """NVFUSER""", """AOT_NVFUSER""", """AOT_CUDAGRAPHS""", """OFI""", """FX2TRT""", """ONNXRT""", """IPEX""", ] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None ) -> Optional[int]: __lowerCamelCase : int = True while ask_again: __lowerCamelCase : Dict = input(lowerCamelCase__ ) try: if default is not None and len(lowerCamelCase__ ) == 0: return default return convert_value(lowerCamelCase__ ) if convert_value is not None else result except Exception: if error_message is not None: print(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=[] , lowerCamelCase__=None , lowerCamelCase__=0 ) -> str: __lowerCamelCase : Union[str, Any] = BulletMenu(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase : Tuple = menu.run(default_choice=lowerCamelCase__ ) return convert_value(lowerCamelCase__ ) if convert_value is not None else result def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict: __lowerCamelCase : List[str] = int(lowerCamelCase__ ) return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]: __lowerCamelCase : Union[str, Any] = int(lowerCamelCase__ ) return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]: __lowerCamelCase : Optional[Any] = int(lowerCamelCase__ ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int: __lowerCamelCase : Union[str, Any] = int(lowerCamelCase__ ) return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str: __lowerCamelCase : Optional[Any] = int(lowerCamelCase__ ) return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]: return {"yes": True, "no": False}[value.lower()] class A_ ( argparse.RawDescriptionHelpFormatter ): def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str): __lowerCamelCase : int = super()._format_usage(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = usage.replace('<command> [<args>] ' ,'') return usage
113
0
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline lowercase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name class A__ ( __UpperCAmelCase ): """simple docstring""" def __init__( self , lowercase , lowercase) -> Optional[Any]: '''simple docstring''' super().__init__() self.register_modules(unet=lowercase , scheduler=lowercase) @torch.no_grad() def __call__( self , lowercase = 1 , lowercase = 100 , lowercase = None , lowercase = None , lowercase = True , ) -> Union[AudioPipelineOutput, Tuple]: '''simple docstring''' if audio_length_in_s is None: a__ : Optional[int] = self.unet.config.sample_size / self.unet.config.sample_rate a__ : int = audio_length_in_s * self.unet.config.sample_rate a__ : Union[str, Any] = 2 ** len(self.unet.up_blocks) if sample_size < 3 * down_scale_factor: raise ValueError( F'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to' F' {3 * down_scale_factor / self.unet.config.sample_rate}.') a__ : str = int(lowercase) if sample_size % down_scale_factor != 0: a__ : List[str] = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled' F' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising' ' process.') a__ : List[Any] = int(lowercase) a__ : int = next(iter(self.unet.parameters())).dtype a__ : Tuple = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(lowercase , lowercase) and len(lowercase) != batch_size: raise ValueError( F'You have passed a list of generators of length {len(lowercase)}, but requested an effective batch' F' size of {batch_size}. Make sure the batch size matches the length of the generators.') a__ : Optional[Any] = randn_tensor(lowercase , generator=lowercase , device=self.device , dtype=lowercase) # set step values self.scheduler.set_timesteps(lowercase , device=audio.device) a__ : Union[str, Any] = self.scheduler.timesteps.to(lowercase) for t in self.progress_bar(self.scheduler.timesteps): # 1. predict noise model_output a__ : Dict = self.unet(lowercase , lowercase).sample # 2. compute previous image: x_t -> t_t-1 a__ : Any = self.scheduler.step(lowercase , lowercase , lowercase).prev_sample a__ : str = audio.clamp(-1 , 1).float().cpu().numpy() a__ : List[Any] = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=lowercase)
99
'''simple docstring''' class _lowercase : def __init__( self: Tuple , UpperCamelCase__: list[int] ): lowerCamelCase__ : Union[str, Any] = len(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = [0] * len_array if len_array > 0: lowerCamelCase__ : Union[str, Any] = array[0] for i in range(1 , UpperCamelCase__ ): lowerCamelCase__ : Tuple = self.prefix_sum[i - 1] + array[i] def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int , UpperCamelCase__: int ): if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: int ): lowerCamelCase__ : Dict = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(UpperCamelCase__ ) return False if __name__ == "__main__": import doctest doctest.testmod()
41
0
'''simple docstring''' def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ): """simple docstring""" return int((input_a, input_a).count(0 ) == 0 ) def snake_case_ ( ): """simple docstring""" assert and_gate(0 , 0 ) == 0 assert and_gate(0 , 1 ) == 0 assert and_gate(1 , 0 ) == 0 assert and_gate(1 , 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
368
'''simple docstring''' def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): """simple docstring""" return round(float(moles / volume ) * nfactor ) def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): """simple docstring""" return round(float((moles * 0.0821 * temperature) / (volume) ) ) def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): """simple docstring""" return round(float((moles * 0.0821 * temperature) / (pressure) ) ) def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): """simple docstring""" return round(float((pressure * volume) / (0.0821 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
264
0
import re def __lowercase ( _A ) -> List[str]: if len(re.findall("""[ATCG]""" , SCREAMING_SNAKE_CASE_ ) ) != len(SCREAMING_SNAKE_CASE_ ): raise ValueError("""Invalid Strand""" ) return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) ) if __name__ == "__main__": import doctest doctest.testmod()
245
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) _snake_case = 2_99_79_24_58 # Symbols _snake_case , _snake_case , _snake_case , _snake_case = symbols('''ct x y z''') def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if velocity > c: raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError("Speed must be greater than or equal to 1!" ) return velocity / c def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE_ ) ** 2 ) def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return np.array( [ [gamma(SCREAMING_SNAKE_CASE_ ), -gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), 0, 0], [-gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), gamma(SCREAMING_SNAKE_CASE_ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' if event is None: lowerCamelCase : Tuple = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(SCREAMING_SNAKE_CASE_ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: _snake_case = transform(29_97_92_45) print('''Example of four vector: ''') print(f'''ct\' = {four_vector[0]}''') print(f'''x\' = {four_vector[1]}''') print(f'''y\' = {four_vector[2]}''') print(f'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values _snake_case = {ct: c, x: 1, y: 1, z: 1} _snake_case = [four_vector[i].subs(sub_dict) for i in range(4)] print(f'''\n{numerical_vector}''')
283
0
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = LayoutLMTokenizer __UpperCamelCase = LayoutLMTokenizerFast __UpperCamelCase = True __UpperCamelCase = True def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' super().setUp() SCREAMING_SNAKE_CASE_ : List[str] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']) with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens])) def _SCREAMING_SNAKE_CASE ( self : Dict , **lowercase_ : List[str]): '''simple docstring''' return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = '''UNwant\u00E9d,running''' SCREAMING_SNAKE_CASE_ : Any = '''unwanted, running''' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = self.tokenizer_class(self.vocab_file) SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.tokenize('''UNwant\u00E9d,running''') self.assertListEqual(lowercase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing''']) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , [7, 4, 5, 10, 8, 9]) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' pass
355
"""simple docstring""" import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList UpperCAmelCase_ : Union[str, Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""] class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : Dict=1): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer SCREAMING_SNAKE_CASE_ : Optional[int] = dataset SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowercase_) if n_tasks is None else n_tasks SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies def __iter__( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = [] for task in range(self.n_tasks): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip()) SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''') for task in range(self.n_tasks): for _ in range(self.n_copies): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : int , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = start_length SCREAMING_SNAKE_CASE_ : List[Any] = eof_strings SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer def __call__( self : Optional[int] , lowercase_ : Any , lowercase_ : int , **lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.tokenizer.batch_decode(input_ids[:, self.start_length :]) SCREAMING_SNAKE_CASE_ : Tuple = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings)) return all(lowercase_) def _A (__a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = re.split('''(%s)''' % '''|'''.join(__a ) , __a ) # last string should be "" return "".join(string_list[:-2] ) def _A (__a , __a , __a , __a , __a , __a=20 , **__a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = defaultdict(__a ) # dict of list of generated tokens for step, batch in tqdm(enumerate(__a ) ): with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Optional[int] = batch['''ids'''].shape[-1] SCREAMING_SNAKE_CASE_ : Tuple = accelerator.unwrap_model(__a ).generate( input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__a , **__a ) # each task is generated batch_size times SCREAMING_SNAKE_CASE_ : List[Any] = batch['''task_id'''].repeat(__a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.pad_across_processes( __a , dim=1 , pad_index=tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) ) SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy() SCREAMING_SNAKE_CASE_ : Optional[Any] = generated_tasks.cpu().numpy() for task, generated_tokens in zip(__a , __a ): gen_token_dict[task].append(__a ) SCREAMING_SNAKE_CASE_ : int = [[] for _ in range(__a )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) code_gens[task].append(remove_last_block(__a ) ) return code_gens def _A () -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser(__a ) SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric SCREAMING_SNAKE_CASE_ : Any = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing SCREAMING_SNAKE_CASE_ : str = '''false''' if args.num_workers is None: SCREAMING_SNAKE_CASE_ : Optional[Any] = multiprocessing.cpu_count() # Use dataset load to feed to accelerate SCREAMING_SNAKE_CASE_ : Tuple = Accelerator() set_seed(args.seed , device_specific=__a ) # Load model and tokenizer SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt ) SCREAMING_SNAKE_CASE_ : Dict = tokenizer.eos_token SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings SCREAMING_SNAKE_CASE_ : List[str] = { '''do_sample''': args.do_sample, '''temperature''': args.temperature, '''max_new_tokens''': args.max_new_tokens, '''top_p''': args.top_p, '''top_k''': args.top_k, '''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ), } # Load evaluation dataset and metric SCREAMING_SNAKE_CASE_ : Optional[int] = load_dataset('''openai_humaneval''' ) SCREAMING_SNAKE_CASE_ : str = load_metric('''code_eval''' ) SCREAMING_SNAKE_CASE_ : int = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] ) SCREAMING_SNAKE_CASE_ : List[str] = args.n_samples // args.batch_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = TokenizedDataset(__a , human_eval['''test'''] , n_copies=__a , n_tasks=__a ) # do not confuse args.batch_size, which is actually the num_return_sequences SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(__a , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] ) except ValueError as exception: print( '''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`''' ''' flag to enable code evaluation.''' ) raise exception SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(__a , __a ) SCREAMING_SNAKE_CASE_ : List[Any] = complete_code( __a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , ) if accelerator.is_main_process: SCREAMING_SNAKE_CASE_ : int = [] for task in tqdm(range(__a ) ): SCREAMING_SNAKE_CASE_ : Tuple = human_eval['''test'''][task]['''test'''] SCREAMING_SNAKE_CASE_ : Tuple = f'check({human_eval["test"][task]["entry_point"]})' references.append('''\n''' + test_func + '''\n''' + entry_point ) # Evaluate completions with "code_eval" metric SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = code_eval_metric.compute( references=__a , predictions=__a , num_workers=args.num_workers ) print(f'Results: {pass_at_k}' ) # Save results to json file with open(args.output_file , '''w''' ) as fp: json.dump(__a , __a ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
318
0
"""simple docstring""" from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake UpperCamelCase_ = numpy.array([0, 0]) UpperCamelCase_ = numpy.array([0.5, 0.8_66_02_54]) UpperCamelCase_ = numpy.array([1, 0]) UpperCamelCase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->list[numpy.ndarray]: """simple docstring""" a_ = initial_vectors for _ in range(__lowercase ): a_ = iteration_step(__lowercase ) return vectors def UpperCamelCase ( UpperCAmelCase ) ->list[numpy.ndarray]: """simple docstring""" a_ = [] for i, start_vector in enumerate(vectors[:-1] ): a_ = vectors[i + 1] new_vectors.append(__lowercase ) a_ = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->numpy.ndarray: """simple docstring""" a_ = numpy.radians(__lowercase ) a_ , a_ = numpy.cos(__lowercase ), numpy.sin(__lowercase ) a_ = numpy.array(((c, -s), (s, c)) ) return numpy.dot(__lowercase , __lowercase ) def UpperCamelCase ( UpperCAmelCase ) ->None: """simple docstring""" a_ = plt.gca() axes.set_aspect("equal" ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() a_ , a_ = zip(*__lowercase ) plt.plot(__lowercase , __lowercase ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase_ = iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
243
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' def A ( self : List[str] ): '''simple docstring''' _snake_case = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowercase , 'tf_padding' ) ) self.parent.assertTrue(hasattr(lowercase , 'depth_multiplier' ) ) class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self : Dict , lowercase : List[str] , lowercase : Dict=13 , lowercase : Optional[int]=3 , lowercase : Any=32 , lowercase : Any=0.25 , lowercase : Union[str, Any]=8 , lowercase : List[Any]=8 , lowercase : List[Any]=6 , lowercase : Dict=32 , lowercase : Dict=True , lowercase : Optional[Any]=True , lowercase : Tuple=True , lowercase : Tuple="relu6" , lowercase : List[Any]=1_280 , lowercase : Optional[Any]=0.1 , lowercase : int=0.02 , lowercase : Optional[Any]=True , lowercase : List[str]=True , lowercase : List[str]=10 , lowercase : Optional[Any]=None , ): '''simple docstring''' _snake_case = parent _snake_case = batch_size _snake_case = num_channels _snake_case = image_size _snake_case = depth_multiplier _snake_case = depth_divisible_by _snake_case = min_depth _snake_case = expand_ratio _snake_case = tf_padding _snake_case = output_stride _snake_case = first_layer_is_expansion _snake_case = finegrained_output _snake_case = hidden_act _snake_case = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) _snake_case = classifier_dropout_prob _snake_case = use_labels _snake_case = is_training _snake_case = num_labels _snake_case = initializer_range _snake_case = scope def A ( self : Union[str, Any] ): '''simple docstring''' _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.num_labels ) _snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _snake_case = self.get_config() return config, pixel_values, labels, pixel_labels def A ( self : str ): '''simple docstring''' return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def A ( self : Optional[Any] , lowercase : str , lowercase : List[str] , lowercase : str , lowercase : Dict ): '''simple docstring''' _snake_case = MobileNetVaModel(config=lowercase ) model.to(lowercase ) model.eval() _snake_case = model(lowercase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) self.parent.assertEqual( result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , ) def A ( self : List[Any] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : List[Any] ): '''simple docstring''' _snake_case = self.num_labels _snake_case = MobileNetVaForImageClassification(lowercase ) model.to(lowercase ) model.eval() _snake_case = model(lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Any , lowercase : int , lowercase : Dict , lowercase : int , lowercase : List[Any] ): '''simple docstring''' _snake_case = self.num_labels _snake_case = MobileNetVaForSemanticSegmentation(lowercase ) model.to(lowercase ) model.eval() _snake_case = model(lowercase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) _snake_case = model(lowercase , labels=lowercase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def A ( self : str ): '''simple docstring''' _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ): '''simple docstring''' _UpperCAmelCase : str = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) _UpperCAmelCase : str = ( { "feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification, "image-segmentation": MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) _UpperCAmelCase : Optional[int] = False _UpperCAmelCase : Dict = False _UpperCAmelCase : Dict = False _UpperCAmelCase : Union[str, Any] = False def A ( self : Any ): '''simple docstring''' _snake_case = MobileNetVaModelTester(self ) _snake_case = MobileNetVaConfigTester(self , config_class=lowercase , has_text_modality=lowercase ) def A ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileNetV2 does not use inputs_embeds' ) def A ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason='MobileNetV2 does not support input and output embeddings' ) def A ( self : int ): '''simple docstring''' pass @unittest.skip(reason='MobileNetV2 does not output attentions' ) def A ( self : Any ): '''simple docstring''' pass def A ( self : Optional[int] ): '''simple docstring''' _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(lowercase ) _snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase ) def A ( self : List[str] ): '''simple docstring''' _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def A ( self : List[Any] ): '''simple docstring''' def check_hidden_states_output(lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : str ): _snake_case = model_class(lowercase ) model.to(lowercase ) model.eval() with torch.no_grad(): _snake_case = model(**self._prepare_for_class(lowercase , lowercase ) ) _snake_case = outputs.hidden_states _snake_case = 16 self.assertEqual(len(lowercase ) , lowercase ) _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = True check_hidden_states_output(lowercase , lowercase , lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(lowercase , lowercase , lowercase ) def A ( self : Tuple ): '''simple docstring''' _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase ) def A ( self : Dict ): '''simple docstring''' _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowercase ) @slow def A ( self : List[Any] ): '''simple docstring''' for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = MobileNetVaModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) def a_ ( ) -> Union[str, Any]: _snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' @cached_property def A ( self : Optional[Any] ): '''simple docstring''' return ( MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None ) @slow def A ( self : List[Any] ): '''simple docstring''' _snake_case = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(lowercase ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase ) # forward pass with torch.no_grad(): _snake_case = model(**lowercase ) # verify the logits _snake_case = torch.Size((1, 1_001) ) self.assertEqual(outputs.logits.shape , lowercase ) _snake_case = torch.tensor([0.2445, -1.1993, 0.1905] ).to(lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) ) @slow def A ( self : Dict ): '''simple docstring''' _snake_case = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' ) _snake_case = model.to(lowercase ) _snake_case = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' ) _snake_case = prepare_img() _snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase ) # forward pass with torch.no_grad(): _snake_case = model(**lowercase ) _snake_case = outputs.logits # verify the logits _snake_case = torch.Size((1, 21, 65, 65) ) self.assertEqual(logits.shape , lowercase ) _snake_case = torch.tensor( [ [[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]], [[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]], [[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]], ] , device=lowercase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase , atol=1E-4 ) )
282
0
"""simple docstring""" _lowerCAmelCase :Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _lowerCAmelCase :List[Any] = [{'type': 'code', 'content': INSTALL_CONTENT}] _lowerCAmelCase :Union[str, Any] = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
68
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase :int = logging.get_logger(__name__) _lowerCAmelCase :Union[str, Any] = { 'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json', 'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json', 'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json', 'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json', 'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json', 'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json', 'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json', 'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json', 'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json', 'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json', } class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''xlm''' a__ ={ '''hidden_size''': '''emb_dim''', '''num_attention_heads''': '''n_heads''', '''num_hidden_layers''': '''n_layers''', '''n_words''': '''vocab_size''', # For backward compatibility } def __init__( self , A=3_0_1_4_5 , A=2_0_4_8 , A=1_2 , A=1_6 , A=0.1 , A=0.1 , A=True , A=False , A=False , A=False , A=1 , A=True , A=5_1_2 , A=2_0_4_8**-0.5 , A=1E-12 , A=0.02 , A=0 , A=1 , A=2 , A=3 , A=5 , A=True , A="first" , A=True , A=None , A=True , A=0.1 , A=5 , A=5 , A=0 , A=0 , A=2 , A=0 , **A , ) -> Tuple: _UpperCAmelCase : Dict = vocab_size _UpperCAmelCase : Tuple = emb_dim _UpperCAmelCase : Optional[Any] = n_layers _UpperCAmelCase : Optional[Any] = n_heads _UpperCAmelCase : Dict = dropout _UpperCAmelCase : int = attention_dropout _UpperCAmelCase : Optional[Any] = gelu_activation _UpperCAmelCase : str = sinusoidal_embeddings _UpperCAmelCase : Any = causal _UpperCAmelCase : Optional[int] = asm _UpperCAmelCase : List[str] = n_langs _UpperCAmelCase : int = use_lang_emb _UpperCAmelCase : Any = layer_norm_eps _UpperCAmelCase : Any = bos_index _UpperCAmelCase : Optional[Any] = eos_index _UpperCAmelCase : List[str] = pad_index _UpperCAmelCase : Optional[int] = unk_index _UpperCAmelCase : Dict = mask_index _UpperCAmelCase : Any = is_encoder _UpperCAmelCase : Optional[Any] = max_position_embeddings _UpperCAmelCase : List[Any] = embed_init_std _UpperCAmelCase : Union[str, Any] = init_std _UpperCAmelCase : List[str] = summary_type _UpperCAmelCase : Dict = summary_use_proj _UpperCAmelCase : str = summary_activation _UpperCAmelCase : Union[str, Any] = summary_proj_to_labels _UpperCAmelCase : Tuple = summary_first_dropout _UpperCAmelCase : List[str] = start_n_top _UpperCAmelCase : Tuple = end_n_top _UpperCAmelCase : List[str] = mask_token_id _UpperCAmelCase : Optional[int] = lang_id if "n_words" in kwargs: _UpperCAmelCase : Tuple = kwargs['''n_words'''] super().__init__(pad_token_id=A , bos_token_id=A , **A ) class _UpperCAmelCase ( a ): '''simple docstring''' @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _UpperCAmelCase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _UpperCAmelCase : Dict = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
68
1
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def _UpperCAmelCase (): _A , _A : Tuple = 9, 14 # noqa: F841 _A : Optional[int] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _A : Optional[Any] = defaultdict(lowerCamelCase__ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) _A : Union[str, Any] = mst(lowerCamelCase__ ) _A : List[str] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _A : str = tuple(answer[:2] ) _A : str = tuple(edge[::-1] ) assert edge in result or reverse in result
11
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = [0 for i in range(r + 1 )] # nc0 = 1 lowerCamelCase_ = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. lowerCamelCase_ = min(lowerCamelCase__ , lowerCamelCase__ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=1_0, r=5))
19
0
"""simple docstring""" import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def lowercase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int=5 ): '''simple docstring''' assert masked_input.count('''<mask>''' ) == 1 _UpperCAmelCase = torch.tensor(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) ).unsqueeze(0 ) # Batch size 1 _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )[0] # The last hidden-state is the first element of the output tuple _UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() _UpperCAmelCase = logits[0, masked_index, :] _UpperCAmelCase = logits.softmax(dim=0 ) _UpperCAmelCase , _UpperCAmelCase = prob.topk(k=_SCREAMING_SNAKE_CASE , dim=0 ) _UpperCAmelCase = ''' '''.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_SCREAMING_SNAKE_CASE ) )] ) _UpperCAmelCase = tokenizer.mask_token _UpperCAmelCase = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ): _UpperCAmelCase = predicted_token_bpe.replace('''\u2581''' , ''' ''' ) if " {0}".format(_SCREAMING_SNAKE_CASE ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(''' {0}'''.format(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), values[index].item(), predicted_token, ) ) return topk_filled_outputs __A : List[Any] = CamembertTokenizer.from_pretrained("camembert-base") __A : Optional[int] = CamembertForMaskedLM.from_pretrained("camembert-base") model.eval() __A : Optional[int] = "Le camembert est <mask> :)" print(fill_mask(masked_input, model, tokenizer, topk=3))
326
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __A : List[Any] = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys __A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
326
1
"""simple docstring""" from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def _A () -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = { '''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''], '''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''], '''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7], } SCREAMING_SNAKE_CASE_ : Dict = Dataset.from_dict(UpperCamelCase__ ) return dataset class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = get_dataset() SCREAMING_SNAKE_CASE_ : int = make_duplicate_clusters(lowercase_ , 0.85) self.assertEqual(len(duplicate_clusters[0]) , 2) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = get_dataset() SCREAMING_SNAKE_CASE_ : Tuple = deduplicate_dataset(lowercase_) self.assertEqual(len(lowercase_) , 2) print(lowercase_) self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2) self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , lowercase_)
91
"""simple docstring""" import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @require_torch def __lowerCAmelCase ( self ) -> Any: _UpperCAmelCase : str = pipeline( task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' ) _UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' ) _UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array'''] _UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , ) @unittest.skip('''No models are available in TF''' ) def __lowerCAmelCase ( self ) -> Union[str, Any]: pass @slow @require_torch def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Union[str, Any] = pipeline( task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , ) # This is an audio of a dog _UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' ) _UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array'''] _UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(A ) , [ {'''score''': 0.999, '''label''': '''Sound of a dog'''}, {'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''}, ] , ) _UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(A ) , [ [ {'''score''': 0.999, '''label''': '''Sound of a dog'''}, {'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''}, ], ] * 5 , ) _UpperCAmelCase : Tuple = audio_classifier( [audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 ) self.assertEqual( nested_simplify(A ) , [ [ {'''score''': 0.999, '''label''': '''Sound of a dog'''}, {'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''}, ], ] * 5 , ) @unittest.skip('''No models are available in TF''' ) def __lowerCAmelCase ( self ) -> int: pass
263
0
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time snake_case : Union[str, Any] = Lock() def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] ): global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 1_0 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(__lowerCAmelCase ) process_lock.release() # receive your right neighbor's value process_lock.acquire() a__ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left a__ = min(__lowerCAmelCase , __lowerCAmelCase ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(__lowerCAmelCase ) process_lock.release() # receive your left neighbor's value process_lock.acquire() a__ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right a__ = max(__lowerCAmelCase , __lowerCAmelCase ) # after all swaps are performed, send the values back to main result_pipe[1].send(__lowerCAmelCase ) def __lowercase ( __lowerCAmelCase : List[str] ): a__ = [] a__ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop a__ = Pipe() a__ = Pipe() process_array_.append( Process( target=__lowerCAmelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) a__ = temp_rs a__ = temp_rr for i in range(1 , len(__lowerCAmelCase ) - 1 ): a__ = Pipe() a__ = Pipe() process_array_.append( Process( target=__lowerCAmelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) a__ = temp_rs a__ = temp_rr process_array_.append( Process( target=__lowerCAmelCase , args=( len(__lowerCAmelCase ) - 1, arr[len(__lowerCAmelCase ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(__lowerCAmelCase ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(__lowerCAmelCase ) ): a__ = result_pipe[p][0].recv() process_array_[p].join() return arr def __lowercase ( ): a__ = list(range(1_0 , 0 , -1 ) ) print('Initial List' ) print(*__lowerCAmelCase ) a__ = odd_even_transposition(__lowerCAmelCase ) print('Sorted List\n' ) print(*__lowerCAmelCase ) if __name__ == "__main__": main()
109
def __lowercase ( __lowerCAmelCase : Optional[Any] ): return [ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], }, { 0: [6], 1: [9], 2: [4, 5], 3: [4], 4: [2, 3], 5: [2], 6: [0, 7], 7: [6], 8: [], 9: [1], }, { 0: [4], 1: [6], 2: [], 3: [5, 6, 7], 4: [0, 6], 5: [3, 8, 9], 6: [1, 3, 4, 7], 7: [3, 6, 8, 9], 8: [5, 7], 9: [5, 7], }, { 0: [1, 3], 1: [0, 2, 4], 2: [1, 3, 4], 3: [0, 2, 4], 4: [1, 2, 3], }, ][index] def __lowercase ( __lowerCAmelCase : dict[int, list[int]] ): a__ = 0 a__ = len(__lowerCAmelCase ) # No of vertices in graph a__ = [0] * n a__ = [False] * n def dfs(__lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ): a__ = True a__ = id_ id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , id_ ) a__ = min(low[at] , low[to] ) if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at) ) else: # This edge is a back edge and cannot be a bridge a__ = min(low[at] , low[to] ) a__ = [] for i in range(__lowerCAmelCase ): if not visited[i]: dfs(__lowerCAmelCase , -1 , __lowerCAmelCase , id_ ) return bridges if __name__ == "__main__": import doctest doctest.testmod()
109
1
from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
51
'''simple docstring''' import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Dict: __lowerCamelCase : Any = checkpoint __lowerCamelCase : List[str] = {} __lowerCamelCase : List[str] = vae_state_dict['encoder.conv_in.weight'] __lowerCamelCase : Dict = vae_state_dict['encoder.conv_in.bias'] __lowerCamelCase : Optional[Any] = vae_state_dict['encoder.conv_out.weight'] __lowerCamelCase : Optional[int] = vae_state_dict['encoder.conv_out.bias'] __lowerCamelCase : Optional[int] = vae_state_dict['encoder.norm_out.weight'] __lowerCamelCase : Optional[int] = vae_state_dict['encoder.norm_out.bias'] __lowerCamelCase : Dict = vae_state_dict['decoder.conv_in.weight'] __lowerCamelCase : List[str] = vae_state_dict['decoder.conv_in.bias'] __lowerCamelCase : Tuple = vae_state_dict['decoder.conv_out.weight'] __lowerCamelCase : List[str] = vae_state_dict['decoder.conv_out.bias'] __lowerCamelCase : Dict = vae_state_dict['decoder.norm_out.weight'] __lowerCamelCase : Union[str, Any] = vae_state_dict['decoder.norm_out.bias'] __lowerCamelCase : Any = vae_state_dict['quant_conv.weight'] __lowerCamelCase : Union[str, Any] = vae_state_dict['quant_conv.bias'] __lowerCamelCase : Tuple = vae_state_dict['post_quant_conv.weight'] __lowerCamelCase : List[Any] = vae_state_dict['post_quant_conv.bias'] # Retrieves the keys for the encoder down blocks only __lowerCamelCase : str = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} ) __lowerCamelCase : int = { layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(_lowerCAmelCase ) } # Retrieves the keys for the decoder up blocks only __lowerCamelCase : Dict = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} ) __lowerCamelCase : Dict = { layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(_lowerCAmelCase ) } for i in range(_lowerCAmelCase ): __lowerCamelCase : List[str] = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key] if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict: __lowerCamelCase : Any = vae_state_dict.pop( F'encoder.down.{i}.downsample.conv.weight' ) __lowerCamelCase : Any = vae_state_dict.pop( F'encoder.down.{i}.downsample.conv.bias' ) __lowerCamelCase : Optional[int] = renew_vae_resnet_paths(_lowerCAmelCase ) __lowerCamelCase : Any = {'old': F'down.{i}.block', 'new': F'down_blocks.{i}.resnets'} assign_to_checkpoint(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,additional_replacements=[meta_path] ,config=_lowerCAmelCase ) __lowerCamelCase : List[Any] = [key for key in vae_state_dict if 'encoder.mid.block' in key] __lowerCamelCase : str = 2 for i in range(1 ,num_mid_res_blocks + 1 ): __lowerCamelCase : int = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key] __lowerCamelCase : Optional[Any] = renew_vae_resnet_paths(_lowerCAmelCase ) __lowerCamelCase : Dict = {'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'} assign_to_checkpoint(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,additional_replacements=[meta_path] ,config=_lowerCAmelCase ) __lowerCamelCase : Optional[Any] = [key for key in vae_state_dict if 'encoder.mid.attn' in key] __lowerCamelCase : int = renew_vae_attention_paths(_lowerCAmelCase ) __lowerCamelCase : Optional[Any] = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,additional_replacements=[meta_path] ,config=_lowerCAmelCase ) conv_attn_to_linear(_lowerCAmelCase ) for i in range(_lowerCAmelCase ): __lowerCamelCase : Optional[Any] = num_up_blocks - 1 - i __lowerCamelCase : Any = [ key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key ] if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict: __lowerCamelCase : Optional[int] = vae_state_dict[ F'decoder.up.{block_id}.upsample.conv.weight' ] __lowerCamelCase : Optional[Any] = vae_state_dict[ F'decoder.up.{block_id}.upsample.conv.bias' ] __lowerCamelCase : Tuple = renew_vae_resnet_paths(_lowerCAmelCase ) __lowerCamelCase : Any = {'old': F'up.{block_id}.block', 'new': F'up_blocks.{i}.resnets'} assign_to_checkpoint(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,additional_replacements=[meta_path] ,config=_lowerCAmelCase ) __lowerCamelCase : Optional[Any] = [key for key in vae_state_dict if 'decoder.mid.block' in key] __lowerCamelCase : Union[str, Any] = 2 for i in range(1 ,num_mid_res_blocks + 1 ): __lowerCamelCase : List[str] = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key] __lowerCamelCase : str = renew_vae_resnet_paths(_lowerCAmelCase ) __lowerCamelCase : Any = {'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'} assign_to_checkpoint(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,additional_replacements=[meta_path] ,config=_lowerCAmelCase ) __lowerCamelCase : Any = [key for key in vae_state_dict if 'decoder.mid.attn' in key] __lowerCamelCase : List[str] = renew_vae_attention_paths(_lowerCAmelCase ) __lowerCamelCase : Optional[Any] = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,additional_replacements=[meta_path] ,config=_lowerCAmelCase ) conv_attn_to_linear(_lowerCAmelCase ) return new_checkpoint def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,) -> List[Any]: # Only support V1 __lowerCamelCase : Any = requests.get( ' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' ) __lowerCamelCase : str = io.BytesIO(r.content ) __lowerCamelCase : Any = OmegaConf.load(_lowerCAmelCase ) __lowerCamelCase : List[Any] = 512 __lowerCamelCase : Dict = 'cuda' if torch.cuda.is_available() else 'cpu' if checkpoint_path.endswith('safetensors' ): from safetensors import safe_open __lowerCamelCase : Optional[Any] = {} with safe_open(_lowerCAmelCase ,framework='pt' ,device='cpu' ) as f: for key in f.keys(): __lowerCamelCase : int = f.get_tensor(_lowerCAmelCase ) else: __lowerCamelCase : int = torch.load(_lowerCAmelCase ,map_location=_lowerCAmelCase )['state_dict'] # Convert the VAE model. __lowerCamelCase : int = create_vae_diffusers_config(_lowerCAmelCase ,image_size=_lowerCAmelCase ) __lowerCamelCase : int = custom_convert_ldm_vae_checkpoint(_lowerCAmelCase ,_lowerCAmelCase ) __lowerCamelCase : Any = AutoencoderKL(**_lowerCAmelCase ) vae.load_state_dict(_lowerCAmelCase ) vae.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') _UpperCamelCase = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
208
0
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation snake_case = logging.get_logger(__name__) snake_case = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} snake_case = { """vocab_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""", }, """merges_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""", }, """tokenizer_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""", }, } snake_case = { """gpt2""": 1_024, """gpt2-medium""": 1_024, """gpt2-large""": 1_024, """gpt2-xl""": 1_024, """distilgpt2""": 1_024, } class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = VOCAB_FILES_NAMES UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Tuple = ['''input_ids''', '''attention_mask'''] UpperCamelCase_ : str = GPTaTokenizer def __init__( self : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int="<|endoftext|>" , UpperCAmelCase_ : int="<|endoftext|>" , UpperCAmelCase_ : Optional[int]="<|endoftext|>" , UpperCAmelCase_ : List[Any]=False , **UpperCAmelCase_ : List[str] , ): super().__init__( UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("add_bos_token" , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , UpperCAmelCase_ ) != add_prefix_space: SCREAMING_SNAKE_CASE : Any = getattr(UpperCAmelCase_ , pre_tok_state.pop("type" ) ) SCREAMING_SNAKE_CASE : Any = add_prefix_space SCREAMING_SNAKE_CASE : Optional[Any] = pre_tok_class(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = add_prefix_space def _A ( self : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ): SCREAMING_SNAKE_CASE : str = kwargs.get("is_split_into_words" , UpperCAmelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : List[Any] = kwargs.get("is_split_into_words" , UpperCAmelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ): SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ ) return tuple(UpperCAmelCase_ ) def _A ( self : Optional[int] , UpperCAmelCase_ : "Conversation" ): SCREAMING_SNAKE_CASE : List[Any] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) + [self.eos_token_id] ) if len(UpperCAmelCase_ ) > self.model_max_length: SCREAMING_SNAKE_CASE : int = input_ids[-self.model_max_length :] return input_ids
319
def lowerCamelCase__ ( ): """simple docstring""" return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] snake_case = generate_large_matrix() snake_case = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def lowerCamelCase__ ( lowercase ): """simple docstring""" assert all(row == sorted(lowercase , reverse=lowercase ) for row in grid ) assert all(list(lowercase ) == sorted(lowercase , reverse=lowercase ) for col in zip(*lowercase ) ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: SCREAMING_SNAKE_CASE : List[Any] = (left + right) // 2 SCREAMING_SNAKE_CASE : Optional[int] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: SCREAMING_SNAKE_CASE : List[Any] = mid + 1 else: SCREAMING_SNAKE_CASE : Dict = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : List[str] = len(grid[0] ) for i in range(len(lowercase ) ): SCREAMING_SNAKE_CASE : Any = find_negative_index(grid[i][:bound] ) total += bound return (len(lowercase ) * len(grid[0] )) - total def lowerCamelCase__ ( lowercase ): """simple docstring""" return len([number for row in grid for number in row if number < 0] ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 0 for row in grid: for i, number in enumerate(lowercase ): if number < 0: total += len(lowercase ) - i break return total def lowerCamelCase__ ( ): """simple docstring""" from timeit import timeit print("Running benchmarks" ) SCREAMING_SNAKE_CASE : List[str] = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): SCREAMING_SNAKE_CASE : Union[str, Any] = timeit(F'''{func}(grid=grid)''' , setup=lowercase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
319
1
'''simple docstring''' import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa snake_case_ : Dict = logging.getLogger(__name__) class lowercase__ ( lowercase ): lowercase__ = """summarization""" lowercase__ = ["""loss"""] lowercase__ = ROUGE_KEYS lowercase__ = """rouge2""" def __init__( self : Any ,lowerCamelCase__ : Dict ,**lowerCamelCase__ : Optional[Any] ): '''simple docstring''' if hparams.sortish_sampler and hparams.gpus > 1: _UpperCamelCase : Optional[Any] = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' ) if hparams.sortish_sampler: raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' ) super().__init__(lowerCamelCase__ ,num_labels=lowerCamelCase__ ,mode=self.mode ,**lowerCamelCase__ ) use_task_specific_params(self.model ,'summarization' ) save_git_info(self.hparams.output_dir ) _UpperCamelCase : Any = Path(self.output_dir ) / 'metrics.json' _UpperCamelCase : int = Path(self.output_dir ) / 'hparams.pkl' pickle_save(self.hparams ,self.hparams_save_path ) _UpperCamelCase : Optional[Any] = 0 _UpperCamelCase : int = defaultdict(lowerCamelCase__ ) _UpperCamelCase : Dict = self.config.model_type _UpperCamelCase : List[str] = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size _UpperCamelCase : dict = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } _UpperCamelCase : Optional[int] = { 'train': self.hparams.n_train, 'val': self.hparams.n_val, 'test': self.hparams.n_test, } _UpperCamelCase : Dict = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} _UpperCamelCase : List[str] = { 'train': self.hparams.max_target_length, 'val': self.hparams.val_max_target_length, 'test': self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}' assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}' if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) _UpperCamelCase : int = get_git_info()['repo_sha'] _UpperCamelCase : List[str] = hparams.num_workers _UpperCamelCase : Dict = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer ,lowerCamelCase__ ): _UpperCamelCase : Optional[Any] = self.tokenizer.lang_code_to_id[hparams.tgt_lang] _UpperCamelCase : int = self.decoder_start_token_id _UpperCamelCase : str = ( SeqaSeqDataset if hasattr(self.tokenizer ,'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset ) _UpperCamelCase : Tuple = False _UpperCamelCase : int = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: _UpperCamelCase : Any = self.hparams.eval_max_gen_length else: _UpperCamelCase : List[str] = self.model.config.max_length _UpperCamelCase : Optional[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Dict[str, torch.Tensor] ): '''simple docstring''' _UpperCamelCase : Optional[Any] = { k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items() } save_json(lowerCamelCase__ ,Path(self.output_dir ) / 'text_batch.json' ) save_json({k: v.tolist() for k, v in batch.items()} ,Path(self.output_dir ) / 'tok_batch.json' ) _UpperCamelCase : Any = True return readable_batch def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : str ,**lowerCamelCase__ : int ): '''simple docstring''' return self.model(lowerCamelCase__ ,**lowerCamelCase__ ) def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : List[int] ): '''simple docstring''' _UpperCamelCase : List[Any] = self.tokenizer.batch_decode( lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ ,clean_up_tokenization_spaces=lowerCamelCase__ ) return lmap(str.strip ,lowerCamelCase__ ) def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : dict ): '''simple docstring''' _UpperCamelCase : Optional[Any] = self.tokenizer.pad_token_id _UpperCamelCase , _UpperCamelCase : Dict = batch['input_ids'], batch['attention_mask'] _UpperCamelCase : Optional[Any] = batch['labels'] if isinstance(self.model ,lowerCamelCase__ ): _UpperCamelCase : Optional[int] = self.model._shift_right(lowerCamelCase__ ) else: _UpperCamelCase : Union[str, Any] = shift_tokens_right(lowerCamelCase__ ,lowerCamelCase__ ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero _UpperCamelCase : Dict = decoder_input_ids self.save_readable_batch(lowerCamelCase__ ) _UpperCamelCase : int = self(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,decoder_input_ids=lowerCamelCase__ ,use_cache=lowerCamelCase__ ) _UpperCamelCase : List[Any] = outputs['logits'] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id _UpperCamelCase : int = nn.CrossEntropyLoss(ignore_index=lowerCamelCase__ ) assert lm_logits.shape[-1] == self.vocab_size _UpperCamelCase : Optional[int] = ce_loss_fct(lm_logits.view(-1 ,lm_logits.shape[-1] ) ,tgt_ids.view(-1 ) ) else: _UpperCamelCase : Optional[Any] = nn.functional.log_softmax(lowerCamelCase__ ,dim=-1 ) _UpperCamelCase , _UpperCamelCase : Dict = label_smoothed_nll_loss( lowerCamelCase__ ,lowerCamelCase__ ,self.hparams.label_smoothing ,ignore_index=lowerCamelCase__ ) return (loss,) @property def UpperCamelCase_ ( self : int ): '''simple docstring''' return self.tokenizer.pad_token_id def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : int ): '''simple docstring''' _UpperCamelCase : Union[str, Any] = self._step(lowerCamelCase__ ) _UpperCamelCase : List[Any] = dict(zip(self.loss_names ,lowerCamelCase__ ) ) # tokens per batch _UpperCamelCase : int = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum() _UpperCamelCase : Any = batch['input_ids'].shape[0] _UpperCamelCase : Optional[Any] = batch['input_ids'].eq(self.pad ).sum() _UpperCamelCase : Tuple = batch['input_ids'].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[str] ): '''simple docstring''' return self._generative_step(lowerCamelCase__ ) def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[Any]="val" ): '''simple docstring''' self.step_count += 1 _UpperCamelCase : Any = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} _UpperCamelCase : Optional[int] = losses['loss'] _UpperCamelCase : Optional[Any] = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len'] } _UpperCamelCase : Tuple = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) _UpperCamelCase : torch.FloatTensor = torch.tensor(lowerCamelCase__ ).type_as(lowerCamelCase__ ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(lowerCamelCase__ ) _UpperCamelCase : Dict = {F'{prefix}_avg_{k}': x for k, x in losses.items()} _UpperCamelCase : Tuple = self.step_count self.metrics[prefix].append(lowerCamelCase__ ) # callback writes this to self.metrics_save_path _UpperCamelCase : Optional[int] = flatten_list([x['preds'] for x in outputs] ) return { "log": all_metrics, "preds": preds, F'{prefix}_loss': loss, F'{prefix}_{self.val_metric}': metric_tensor, } def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int] ): '''simple docstring''' return calculate_rouge(lowerCamelCase__ ,lowerCamelCase__ ) def UpperCamelCase_ ( self : str ,lowerCamelCase__ : dict ): '''simple docstring''' _UpperCamelCase : Any = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') _UpperCamelCase : Any = self.model.generate( batch['input_ids'] ,attention_mask=batch['attention_mask'] ,use_cache=lowerCamelCase__ ,decoder_start_token_id=self.decoder_start_token_id ,num_beams=self.eval_beams ,max_length=self.eval_max_length ,) _UpperCamelCase : Tuple = (time.time() - ta) / batch['input_ids'].shape[0] _UpperCamelCase : List[str] = self.ids_to_clean_text(lowerCamelCase__ ) _UpperCamelCase : List[str] = self.ids_to_clean_text(batch['labels'] ) _UpperCamelCase : List[Any] = self._step(lowerCamelCase__ ) _UpperCamelCase : int = dict(zip(self.loss_names ,lowerCamelCase__ ) ) _UpperCamelCase : Dict = self.calc_generative_metrics(lowerCamelCase__ ,lowerCamelCase__ ) _UpperCamelCase : Dict = np.mean(lmap(lowerCamelCase__ ,lowerCamelCase__ ) ) base_metrics.update(gen_time=lowerCamelCase__ ,gen_len=lowerCamelCase__ ,preds=lowerCamelCase__ ,target=lowerCamelCase__ ,**lowerCamelCase__ ) return base_metrics def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Dict ,lowerCamelCase__ : str ): '''simple docstring''' return self._generative_step(lowerCamelCase__ ) def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Dict ): '''simple docstring''' return self.validation_epoch_end(lowerCamelCase__ ,prefix='test' ) def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : str ): '''simple docstring''' _UpperCamelCase : int = self.n_obs[type_path] _UpperCamelCase : Optional[int] = self.target_lens[type_path] _UpperCamelCase : int = self.dataset_class( self.tokenizer ,type_path=lowerCamelCase__ ,n_obs=lowerCamelCase__ ,max_target_length=lowerCamelCase__ ,**self.dataset_kwargs ,) return dataset def UpperCamelCase_ ( self : int ,lowerCamelCase__ : str ,lowerCamelCase__ : int ,lowerCamelCase__ : bool = False ): '''simple docstring''' _UpperCamelCase : Any = self.get_dataset(lowerCamelCase__ ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": _UpperCamelCase : List[str] = dataset.make_sortish_sampler(lowerCamelCase__ ,distributed=self.hparams.gpus > 1 ) return DataLoader( lowerCamelCase__ ,batch_size=lowerCamelCase__ ,collate_fn=dataset.collate_fn ,shuffle=lowerCamelCase__ ,num_workers=self.num_workers ,sampler=lowerCamelCase__ ,) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": _UpperCamelCase : Union[str, Any] = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch ,distributed=self.hparams.gpus > 1 ) return DataLoader( lowerCamelCase__ ,batch_sampler=lowerCamelCase__ ,collate_fn=dataset.collate_fn ,num_workers=self.num_workers ,) else: return DataLoader( lowerCamelCase__ ,batch_size=lowerCamelCase__ ,collate_fn=dataset.collate_fn ,shuffle=lowerCamelCase__ ,num_workers=self.num_workers ,sampler=lowerCamelCase__ ,) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' _UpperCamelCase : Optional[Any] = self.get_dataloader('train' ,batch_size=self.hparams.train_batch_size ,shuffle=lowerCamelCase__ ) return dataloader def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' return self.get_dataloader('val' ,batch_size=self.hparams.eval_batch_size ) def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return self.get_dataloader('test' ,batch_size=self.hparams.eval_batch_size ) @staticmethod def UpperCamelCase_ ( lowerCamelCase__ : Any ,lowerCamelCase__ : Tuple ): '''simple docstring''' BaseTransformer.add_model_specific_args(lowerCamelCase__ ,lowerCamelCase__ ) add_generic_args(lowerCamelCase__ ,lowerCamelCase__ ) parser.add_argument( '--max_source_length' ,default=1024 ,type=lowerCamelCase__ ,help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) ,) parser.add_argument( '--max_target_length' ,default=56 ,type=lowerCamelCase__ ,help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) ,) parser.add_argument( '--val_max_target_length' ,default=142 ,type=lowerCamelCase__ ,help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) ,) parser.add_argument( '--test_max_target_length' ,default=142 ,type=lowerCamelCase__ ,help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) ,) parser.add_argument('--freeze_encoder' ,action='store_true' ) parser.add_argument('--freeze_embeds' ,action='store_true' ) parser.add_argument('--sortish_sampler' ,action='store_true' ,default=lowerCamelCase__ ) parser.add_argument('--overwrite_output_dir' ,action='store_true' ,default=lowerCamelCase__ ) parser.add_argument('--max_tokens_per_batch' ,type=lowerCamelCase__ ,default=lowerCamelCase__ ) parser.add_argument('--logger_name' ,type=lowerCamelCase__ ,choices=['default', 'wandb', 'wandb_shared'] ,default='default' ) parser.add_argument('--n_train' ,type=lowerCamelCase__ ,default=-1 ,required=lowerCamelCase__ ,help='# examples. -1 means use all.' ) parser.add_argument('--n_val' ,type=lowerCamelCase__ ,default=500 ,required=lowerCamelCase__ ,help='# examples. -1 means use all.' ) parser.add_argument('--n_test' ,type=lowerCamelCase__ ,default=-1 ,required=lowerCamelCase__ ,help='# examples. -1 means use all.' ) parser.add_argument( '--task' ,type=lowerCamelCase__ ,default='summarization' ,required=lowerCamelCase__ ,help='# examples. -1 means use all.' ) parser.add_argument('--label_smoothing' ,type=lowerCamelCase__ ,default=0.0 ,required=lowerCamelCase__ ) parser.add_argument('--src_lang' ,type=lowerCamelCase__ ,default='' ,required=lowerCamelCase__ ) parser.add_argument('--tgt_lang' ,type=lowerCamelCase__ ,default='' ,required=lowerCamelCase__ ) parser.add_argument('--eval_beams' ,type=lowerCamelCase__ ,default=lowerCamelCase__ ,required=lowerCamelCase__ ) parser.add_argument( '--val_metric' ,type=lowerCamelCase__ ,default=lowerCamelCase__ ,required=lowerCamelCase__ ,choices=['bleu', 'rouge2', 'loss', None] ) parser.add_argument('--eval_max_gen_length' ,type=lowerCamelCase__ ,default=lowerCamelCase__ ,help='never generate more than n tokens' ) parser.add_argument('--save_top_k' ,type=lowerCamelCase__ ,default=1 ,required=lowerCamelCase__ ,help='How many checkpoints to save' ) parser.add_argument( '--early_stopping_patience' ,type=lowerCamelCase__ ,default=-1 ,required=lowerCamelCase__ ,help=( '-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So' ' val_check_interval will effect it.' ) ,) return parser class lowercase__ ( lowercase ): lowercase__ = """translation""" lowercase__ = ["""loss"""] lowercase__ = ["""bleu"""] lowercase__ = """bleu""" def __init__( self : Tuple ,lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : List[Any] ): '''simple docstring''' super().__init__(lowerCamelCase__ ,**lowerCamelCase__ ) _UpperCamelCase : Tuple = hparams.src_lang _UpperCamelCase : Tuple = hparams.tgt_lang def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : str ): '''simple docstring''' return calculate_bleu(lowerCamelCase__ ,lowerCamelCase__ ) def A__ ( UpperCAmelCase_ , UpperCAmelCase_=None ): Path(args.output_dir ).mkdir(exist_ok=UpperCAmelCase_ ) check_output_dir(UpperCAmelCase_ , expected_items=3 ) if model is None: if "summarization" in args.task: _UpperCamelCase : SummarizationModule = SummarizationModule(UpperCAmelCase_ ) else: _UpperCamelCase : SummarizationModule = TranslationModule(UpperCAmelCase_ ) _UpperCamelCase : str = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith('/tmp' ) or str(args.output_dir ).startswith('/var' ) ): _UpperCamelCase : Any = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger _UpperCamelCase : Optional[Any] = os.environ.get('WANDB_PROJECT' , UpperCAmelCase_ ) _UpperCamelCase : Any = WandbLogger(name=model.output_dir.name , project=UpperCAmelCase_ ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger _UpperCamelCase : List[Any] = WandbLogger(name=model.output_dir.name , project=f'hf_{dataset}' ) if args.early_stopping_patience >= 0: _UpperCamelCase : List[Any] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: _UpperCamelCase : List[str] = False _UpperCamelCase : Optional[Any] = args.val_metric == 'loss' _UpperCamelCase : pl.Trainer = generic_train( UpperCAmelCase_ , UpperCAmelCase_ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , UpperCAmelCase_ ) , early_stopping_callback=UpperCAmelCase_ , logger=UpperCAmelCase_ , ) pickle_save(model.hparams , model.output_dir / 'hparams.pkl' ) if not args.do_predict: return model _UpperCamelCase : List[str] = '' _UpperCamelCase : int = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=UpperCAmelCase_ ) ) if checkpoints: _UpperCamelCase : Optional[Any] = checkpoints[-1] _UpperCamelCase : Union[str, Any] = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": snake_case_ : Any = argparse.ArgumentParser() snake_case_ : Tuple = pl.Trainer.add_argparse_args(parser) snake_case_ : Tuple = SummarizationModule.add_model_specific_args(parser, os.getcwd()) snake_case_ : Optional[int] = parser.parse_args() main(args)
83
import doctest from collections import deque import numpy as np class __SCREAMING_SNAKE_CASE: def __init__( self: Dict ) -> None: snake_case__ = [2, 1, 2, -1] snake_case__ = [1, 2, 3, 4] def lowerCAmelCase_ ( self: List[str] ) -> list[float]: snake_case__ = len(self.first_signal ) snake_case__ = len(self.second_signal ) snake_case__ = max(UpperCamelCase , UpperCamelCase ) # create a zero matrix of max_length x max_length snake_case__ = [[0] * max_length for i in range(UpperCamelCase )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(UpperCamelCase ): snake_case__ = deque(self.second_signal ) rotated_signal.rotate(UpperCamelCase ) for j, item in enumerate(UpperCamelCase ): matrix[i][j] += item # multiply the matrix with the first signal snake_case__ = np.matmul(np.transpose(UpperCamelCase ) , np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(UpperCamelCase , 2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
307
0
'''simple docstring''' import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : List[str] ): '''simple docstring''' super().tearDown() gc.collect() def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) _A = "xvjiarui/stable-diffusion-2-inpainting" _A , _A = FlaxStableDiffusionInpaintPipeline.from_pretrained(_lowercase , safety_checker=_lowercase ) _A = "Face of a yellow cat, high resolution, sitting on a park bench" _A = jax.random.PRNGKey(0 ) _A = 50 _A = jax.device_count() _A = num_samples * [prompt] _A = num_samples * [init_image] _A = num_samples * [mask_image] _A , _A , _A = pipeline.prepare_inputs(_lowercase , _lowercase , _lowercase ) # shard inputs and rng _A = replicate(_lowercase ) _A = jax.random.split(_lowercase , jax.device_count() ) _A = shard(_lowercase ) _A = shard(_lowercase ) _A = shard(_lowercase ) _A = pipeline( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ) _A = output.images.reshape(_lowercase , 512 , 512 , 3 ) _A = images[0, 253:256, 253:256, -1] _A = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _A = jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(f'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
366
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''', # See all LeViT models at https://huggingface.co/models?filter=levit } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''levit''' def __init__( self : str , __UpperCAmelCase : int=224 , __UpperCAmelCase : Optional[Any]=3 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Any=1 , __UpperCAmelCase : int=16 , __UpperCAmelCase : Any=[128, 256, 384] , __UpperCAmelCase : Optional[Any]=[4, 8, 12] , __UpperCAmelCase : Dict=[4, 4, 4] , __UpperCAmelCase : Union[str, Any]=[16, 16, 16] , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : str=[2, 2, 2] , __UpperCAmelCase : Optional[Any]=[2, 2, 2] , __UpperCAmelCase : int=0.02 , **__UpperCAmelCase : Dict , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) _A = image_size _A = num_channels _A = kernel_size _A = stride _A = padding _A = hidden_sizes _A = num_attention_heads _A = depths _A = key_dim _A = drop_path_rate _A = patch_size _A = attention_ratio _A = mlp_ratio _A = initializer_range _A = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = version.parse('''1.11''' ) @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def lowerCAmelCase ( self : Any ): '''simple docstring''' return 1E-4
174
0
UpperCAmelCase_ : Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] UpperCAmelCase_ : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] UpperCAmelCase_ : str = { 0: 'Sunday', 1: 'Monday', 2: 'Tuesday', 3: 'Wednesday', 4: 'Thursday', 5: 'Friday', 6: 'Saturday', } def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int , __A : int ) -> str: """simple docstring""" assert len(str(__A ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: a_ : List[str] = year // 1_00 a_ : Optional[int] = (5 * (century % 4) + 2) % 7 a_ : List[str] = year % 1_00 a_ : str = centurian % 12 a_ : List[str] = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 a_ : Any = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) a_ : Any = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
32
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : List[str] ,__lowerCamelCase : Dict=True ,__lowerCamelCase : List[Any]="pt" ): lowercase_ :Dict = {"add_prefix_space": True} if isinstance(__lowerCamelCase ,__lowerCamelCase ) and not line.startswith(" " ) else {} lowercase_ :str = padding_side return tokenizer( [line] ,max_length=__lowerCamelCase ,padding="max_length" if pad_to_max_length else None ,truncation=__lowerCamelCase ,return_tensors=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,**__lowerCamelCase ,) def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : Dict ,__lowerCamelCase : str=None ,): lowercase_ :Optional[int] = input_ids.ne(__lowerCamelCase ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class a_ ( _lowerCAmelCase ): def __init__( self : Optional[int] , lowercase : Any , lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : str="train" , lowercase : Dict=None , lowercase : Tuple=None , lowercase : List[str]=None , lowercase : int="" , ): """simple docstring""" super().__init__() lowercase_ :List[Any] = Path(lowercase ).joinpath(type_path + ".source" ) lowercase_ :Dict = Path(lowercase ).joinpath(type_path + ".target" ) lowercase_ :Optional[int] = self.get_char_lens(self.src_file ) lowercase_ :List[str] = max_source_length lowercase_ :str = max_target_length assert min(self.src_lens ) > 0, F'found empty line in {self.src_file}' lowercase_ :int = tokenizer lowercase_ :Dict = prefix if n_obs is not None: lowercase_ :Union[str, Any] = self.src_lens[:n_obs] lowercase_ :Optional[int] = src_lang lowercase_ :str = tgt_lang def __len__( self : Tuple ): """simple docstring""" return len(self.src_lens ) def __getitem__( self : str , lowercase : Dict ): """simple docstring""" lowercase_ :Tuple = index + 1 # linecache starts at 1 lowercase_ :Optional[Any] = self.prefix + linecache.getline(str(self.src_file ) , lowercase ).rstrip("\n" ) lowercase_ :List[str] = linecache.getline(str(self.tgt_file ) , lowercase ).rstrip("\n" ) assert source_line, F'empty source line for index {index}' assert tgt_line, F'empty tgt line for index {index}' # Need to add eos token manually for T5 if isinstance(self.tokenizer , lowercase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right lowercase_ :List[str] = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , lowercase ) else self.tokenizer ) lowercase_ :int = self.tokenizer.generator if isinstance(self.tokenizer , lowercase ) else self.tokenizer lowercase_ :List[str] = encode_line(lowercase , lowercase , self.max_source_length , "right" ) lowercase_ :Any = encode_line(lowercase , lowercase , self.max_target_length , "right" ) lowercase_ :Dict = source_inputs["input_ids"].squeeze() lowercase_ :Tuple = target_inputs["input_ids"].squeeze() lowercase_ :Optional[int] = source_inputs["attention_mask"].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def lowercase__ ( lowercase : Union[str, Any] ): """simple docstring""" return [len(lowercase ) for x in Path(lowercase ).open().readlines()] def lowercase__ ( self : str , lowercase : List[Any] ): """simple docstring""" lowercase_ :Optional[int] = torch.stack([x["input_ids"] for x in batch] ) lowercase_ :Dict = torch.stack([x["attention_mask"] for x in batch] ) lowercase_ :List[str] = torch.stack([x["decoder_input_ids"] for x in batch] ) lowercase_ :Any = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , lowercase ) else self.tokenizer.pad_token_id ) lowercase_ :str = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , lowercase ) else self.tokenizer.pad_token_id ) lowercase_ :Union[str, Any] = trim_batch(lowercase , lowercase ) lowercase_ , lowercase_ :Optional[Any] = trim_batch(lowercase , lowercase , attention_mask=lowercase ) lowercase_ :Tuple = { "input_ids": source_ids, "attention_mask": source_mask, "decoder_input_ids": y, } return batch lowerCAmelCase : List[str] =getLogger(__name__) def UpperCAmelCase_ ( __lowerCamelCase : List[List] ): return list(itertools.chain.from_iterable(__lowerCamelCase ) ) def UpperCAmelCase_ ( __lowerCamelCase : str ): lowercase_ :List[str] = get_git_info() save_json(__lowerCamelCase ,os.path.join(__lowerCamelCase ,"git_log.json" ) ) def UpperCAmelCase_ ( __lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : List[Any]=4 ,**__lowerCamelCase : List[str] ): with open(__lowerCamelCase ,"w" ) as f: json.dump(__lowerCamelCase ,__lowerCamelCase ,indent=__lowerCamelCase ,**__lowerCamelCase ) def UpperCAmelCase_ ( __lowerCamelCase : Tuple ): with open(__lowerCamelCase ) as f: return json.load(__lowerCamelCase ) def UpperCAmelCase_ ( ): lowercase_ :Dict = git.Repo(search_parent_directories=__lowerCamelCase ) lowercase_ :List[str] = { "repo_id": str(__lowerCamelCase ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), "hostname": str(socket.gethostname() ), } return repo_infos def UpperCAmelCase_ ( __lowerCamelCase : Callable ,__lowerCamelCase : Iterable ): return list(map(__lowerCamelCase ,__lowerCamelCase ) ) def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] ,__lowerCamelCase : List[str] ): with open(__lowerCamelCase ,"wb" ) as f: return pickle.dump(__lowerCamelCase ,__lowerCamelCase ) def UpperCAmelCase_ ( __lowerCamelCase : str ): def remove_articles(__lowerCamelCase : Optional[int] ): return re.sub(r"\b(a|an|the)\b" ," " ,__lowerCamelCase ) def white_space_fix(__lowerCamelCase : Dict ): return " ".join(text.split() ) def remove_punc(__lowerCamelCase : Optional[Any] ): lowercase_ :Any = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__lowerCamelCase : List[Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) ) def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[int] ): lowercase_ :Tuple = normalize_answer(__lowerCamelCase ).split() lowercase_ :Dict = normalize_answer(__lowerCamelCase ).split() lowercase_ :Tuple = Counter(__lowerCamelCase ) & Counter(__lowerCamelCase ) lowercase_ :Tuple = sum(common.values() ) if num_same == 0: return 0 lowercase_ :Union[str, Any] = 1.0 * num_same / len(__lowerCamelCase ) lowercase_ :List[Any] = 1.0 * num_same / len(__lowerCamelCase ) lowercase_ :Tuple = (2 * precision * recall) / (precision + recall) return fa def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : Union[str, Any] ): return normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase ) def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : List[str] ): assert len(__lowerCamelCase ) == len(__lowerCamelCase ) lowercase_ :Any = 0 for hypo, pred in zip(__lowerCamelCase ,__lowerCamelCase ): em += exact_match_score(__lowerCamelCase ,__lowerCamelCase ) if len(__lowerCamelCase ) > 0: em /= len(__lowerCamelCase ) return {"em": em} def UpperCAmelCase_ ( __lowerCamelCase : str ): return model_prefix.startswith("rag" ) def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : int ): lowercase_ :Dict = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead lowercase_ :List[str] = "dropout_rate" for p in extra_params: if getattr(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ): if not hasattr(__lowerCamelCase ,__lowerCamelCase ) and not hasattr(__lowerCamelCase ,equivalent_param[p] ): logger.info("config doesn't have a `{}` attribute".format(__lowerCamelCase ) ) delattr(__lowerCamelCase ,__lowerCamelCase ) continue lowercase_ :List[Any] = p if hasattr(__lowerCamelCase ,__lowerCamelCase ) else equivalent_param[p] setattr(__lowerCamelCase ,__lowerCamelCase ,getattr(__lowerCamelCase ,__lowerCamelCase ) ) delattr(__lowerCamelCase ,__lowerCamelCase ) return hparams, config
223
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase): UpperCamelCase__ : Optional[Any] =StableDiffusionPanoramaPipeline UpperCamelCase__ : int =TEXT_TO_IMAGE_PARAMS UpperCamelCase__ : List[str] =TEXT_TO_IMAGE_BATCH_PARAMS UpperCamelCase__ : Optional[Any] =TEXT_TO_IMAGE_IMAGE_PARAMS UpperCamelCase__ : List[Any] =TEXT_TO_IMAGE_IMAGE_PARAMS def A__ ( self : Dict ): torch.manual_seed(0 ) lowercase__ = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) lowercase__ = DDIMScheduler() torch.manual_seed(0 ) lowercase__ = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0 ) lowercase__ = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-0_5, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) lowercase__ = CLIPTextModel(lowerCamelCase_ ) lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowercase__ = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def A__ ( self : Tuple, __lowercase : int, __lowercase : Any=0 ): lowercase__ = torch.manual_seed(lowerCamelCase_ ) lowercase__ = { "prompt": "a photo of the dolomites", "generator": generator, # Setting height and width to None to prevent OOMs on CPU. "height": None, "width": None, "num_inference_steps": 1, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def A__ ( self : Any ): lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = StableDiffusionPanoramaPipeline(**lowerCamelCase_ ) lowercase__ = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowercase__ = self.get_dummy_inputs(lowerCamelCase_ ) lowercase__ = sd_pipe(**lowerCamelCase_ ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self : Optional[int] ): super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def A__ ( self : int ): super().test_inference_batch_single_identical(batch_size=2, expected_max_diff=3.2_5e-3 ) def A__ ( self : List[str] ): lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = StableDiffusionPanoramaPipeline(**lowerCamelCase_ ) lowercase__ = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowercase__ = self.get_dummy_inputs(lowerCamelCase_ ) lowercase__ = "french fries" lowercase__ = sd_pipe(**lowerCamelCase_, negative_prompt=lowerCamelCase_ ) lowercase__ = output.images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self : Optional[Any] ): lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = StableDiffusionPanoramaPipeline(**lowerCamelCase_ ) lowercase__ = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowercase__ = self.get_dummy_inputs(lowerCamelCase_ ) lowercase__ = sd_pipe(**lowerCamelCase_, view_batch_size=2 ) lowercase__ = output.images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self : List[str] ): lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = EulerAncestralDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" ) lowercase__ = StableDiffusionPanoramaPipeline(**lowerCamelCase_ ) lowercase__ = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowercase__ = self.get_dummy_inputs(lowerCamelCase_ ) lowercase__ = sd_pipe(**lowerCamelCase_ ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self : int ): lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = PNDMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=lowerCamelCase_ ) lowercase__ = StableDiffusionPanoramaPipeline(**lowerCamelCase_ ) lowercase__ = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowercase__ = self.get_dummy_inputs(lowerCamelCase_ ) lowercase__ = sd_pipe(**lowerCamelCase_ ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class _snake_case ( unittest.TestCase): def A__ ( self : Dict ): super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self : Optional[int], __lowercase : Optional[int]=0 ): lowercase__ = torch.manual_seed(lowerCamelCase_ ) lowercase__ = { "prompt": "a photo of the dolomites", "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def A__ ( self : Any ): lowercase__ = "stabilityai/stable-diffusion-2-base" lowercase__ = DDIMScheduler.from_pretrained(lowerCamelCase_, subfolder="scheduler" ) lowercase__ = StableDiffusionPanoramaPipeline.from_pretrained(lowerCamelCase_, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_ ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) pipe.enable_attention_slicing() lowercase__ = self.get_inputs() lowercase__ = pipe(**lowerCamelCase_ ).images lowercase__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase__ = np.array( [ 0.36968392, 0.27025372, 0.32446766, 0.28379387, 0.36363274, 0.30733347, 0.27100027, 0.27054125, 0.25536096, ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-2 def A__ ( self : Union[str, Any] ): lowercase__ = StableDiffusionPanoramaPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base", safety_checker=lowerCamelCase_ ) lowercase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) pipe.enable_attention_slicing() lowercase__ = self.get_inputs() lowercase__ = pipe(**lowerCamelCase_ ).images lowercase__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase__ = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def A__ ( self : List[Any] ): lowercase__ = 0 def callback_fn(__lowercase : Any, __lowercase : Optional[int], __lowercase : Tuple ) -> None: lowercase__ = True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowercase__ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase__ = latents[0, -3:, -3:, -1] lowercase__ = np.array( [ 0.18681869, 0.33907816, 0.5361276, 0.14432865, -0.02856611, -0.73941123, 0.23397987, 0.47322682, -0.37823164, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: lowercase__ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase__ = latents[0, -3:, -3:, -1] lowercase__ = np.array( [ 0.18539645, 0.33987248, 0.5378559, 0.14437142, -0.02455261, -0.7338317, 0.23990755, 0.47356272, -0.3786505, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 lowercase__ = False lowercase__ = "stabilityai/stable-diffusion-2-base" lowercase__ = DDIMScheduler.from_pretrained(lowerCamelCase_, subfolder="scheduler" ) lowercase__ = StableDiffusionPanoramaPipeline.from_pretrained(lowerCamelCase_, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_ ) lowercase__ = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) pipe.enable_attention_slicing() lowercase__ = self.get_inputs() pipe(**lowerCamelCase_, callback=lowerCamelCase_, callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def A__ ( self : Optional[int] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase__ = "stabilityai/stable-diffusion-2-base" lowercase__ = DDIMScheduler.from_pretrained(lowerCamelCase_, subfolder="scheduler" ) lowercase__ = StableDiffusionPanoramaPipeline.from_pretrained(lowerCamelCase_, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_ ) lowercase__ = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowercase__ = self.get_inputs() lowercase__ = pipe(**lowerCamelCase_ ) lowercase__ = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
355
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = int(SCREAMING_SNAKE_CASE_ ) if decimal in (0, 1): # Exit cases for the recursion return str(SCREAMING_SNAKE_CASE_ ) lowercase__ , lowercase__ = divmod(SCREAMING_SNAKE_CASE_ , 2 ) return binary_recursive(SCREAMING_SNAKE_CASE_ ) + str(SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = str(SCREAMING_SNAKE_CASE_ ).strip() if not number: raise ValueError("No input value was provided" ) lowercase__ = "-" if number.startswith("-" ) else "" lowercase__ = number.lstrip("-" ) if not number.isnumeric(): raise ValueError("Input value is not an integer" ) return f'''{negative}0b{binary_recursive(int(SCREAMING_SNAKE_CASE_ ) )}''' if __name__ == "__main__": from doctest import testmod testmod()
224
0
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets lowerCamelCase__ = """\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } """ lowerCamelCase__ = """\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve """ lowerCamelCase__ = """ Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: \"c\" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric('mauve') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE ( datasets.Metric ): def UpperCamelCase_ ( self : int ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[ """https://arxiv.org/abs/2102.01454""", """https://github.com/krishnap25/mauve""", ] , ) def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : List[Any]=None , __lowercase : Tuple=None , __lowercase : Union[str, Any]=None , __lowercase : Optional[Any]=None , __lowercase : List[str]="auto" , __lowercase : int=-1 , __lowercase : Tuple=0.9 , __lowercase : Dict=5 , __lowercase : Any=500 , __lowercase : Optional[int]="gpt2-large" , __lowercase : Optional[int]=-1 , __lowercase : Dict=1024 , __lowercase : Any=25 , __lowercase : List[Any]=5 , __lowercase : List[str]=True , __lowercase : List[Any]=25 , ): '''simple docstring''' __a = compute_mauve( p_text=__lowercase , q_text=__lowercase , p_features=__lowercase , q_features=__lowercase , p_tokens=__lowercase , q_tokens=__lowercase , num_buckets=__lowercase , pca_max_data=__lowercase , kmeans_explained_var=__lowercase , kmeans_num_redo=__lowercase , kmeans_max_iter=__lowercase , featurize_model_name=__lowercase , device_id=__lowercase , max_text_length=__lowercase , divergence_curve_discretization_size=__lowercase , mauve_scaling_factor=__lowercase , verbose=__lowercase , seed=__lowercase , ) return out
302
from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : List[Any] ='autoformer' __lowerCamelCase : str ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self : List[Any] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase : bool = True , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 64 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 100 , __lowercase : float = 0.02 , __lowercase : bool = True , __lowercase : List[Any]=True , __lowercase : int = 10 , __lowercase : int = 25 , __lowercase : int = 3 , **__lowercase : Optional[int] , ): '''simple docstring''' # time series specific configuration __a = prediction_length __a = context_length if context_length is not None else prediction_length __a = distribution_output __a = loss __a = input_size __a = num_time_features __a = lags_sequence __a = scaling __a = num_dynamic_real_features __a = num_static_real_features __a = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(__lowercase ) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""" ) __a = cardinality else: __a = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(__lowercase ) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""" ) __a = embedding_dimension else: __a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] __a = num_parallel_samples # Transformer architecture configuration __a = input_size * len(self.lags_sequence ) + self._number_of_features __a = d_model __a = encoder_attention_heads __a = decoder_attention_heads __a = encoder_ffn_dim __a = decoder_ffn_dim __a = encoder_layers __a = decoder_layers __a = dropout __a = attention_dropout __a = activation_dropout __a = encoder_layerdrop __a = decoder_layerdrop __a = activation_function __a = init_std __a = use_cache # Autoformer __a = label_length __a = moving_average __a = autocorrelation_factor super().__init__(is_encoder_decoder=__lowercase , **__lowercase ) @property def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
302
1
'''simple docstring''' import warnings from ..trainer import Trainer from ..utils import logging lowerCamelCase :List[str] = logging.get_logger(__name__) class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase=None , **lowercase ): warnings.warn( """`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """ """instead.""" , lowercase , ) super().__init__(args=lowercase , **lowercase )
135
'''simple docstring''' from __future__ import annotations from collections.abc import Callable lowerCamelCase :List[Any] = list[list[float | int]] def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : int = len(lowerCamelCase__ ) A_ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowerCamelCase__ )] A_ : int A_ : int A_ : int A_ : int A_ : int A_ : float for row in range(lowerCamelCase__ ): for col in range(lowerCamelCase__ ): A_ : List[str] = matrix[row][col] A_ : Optional[int] = vector[row][0] A_ : Tuple = 0 A_ : Tuple = 0 while row < size and col < size: # pivoting A_ : Dict = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowerCamelCase__ , lowerCamelCase__ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: A_, A_ : Optional[Any] = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , lowerCamelCase__ ): A_ : int = augmented[rowa][col] / augmented[row][col] A_ : Union[str, Any] = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , lowerCamelCase__ ): for row in range(lowerCamelCase__ ): A_ : Tuple = augmented[row][col] / augmented[col][col] for cola in range(lowerCamelCase__ , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowerCamelCase__ ) ] def a ( lowerCamelCase__ ): '''simple docstring''' A_ : int = len(lowerCamelCase__ ) A_ : Matrix = [[0 for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )] A_ : Matrix = [[0] for _ in range(lowerCamelCase__ )] A_ : Matrix A_ : int A_ : int A_ : int for x_val, y_val in enumerate(lowerCamelCase__ ): for col in range(lowerCamelCase__ ): A_ : Dict = (x_val + 1) ** (size - col - 1) A_ : Any = y_val A_ : Dict = solve(lowerCamelCase__ , lowerCamelCase__ ) def interpolated_func(lowerCamelCase__ ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(lowerCamelCase__ ) ) return interpolated_func def a ( lowerCamelCase__ ): '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def a ( lowerCamelCase__ = question_function , lowerCamelCase__ = 10 ): '''simple docstring''' A_ : list[int] = [func(lowerCamelCase__ ) for x_val in range(1 , order + 1 )] A_ : list[Callable[[int], int]] = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] A_ : int = 0 A_ : Callable[[int], int] A_ : int for poly in polynomials: A_ : int = 1 while func(lowerCamelCase__ ) == poly(lowerCamelCase__ ): x_val += 1 ret += poly(lowerCamelCase__ ) return ret if __name__ == "__main__": print(F"{solution() = }")
135
1