code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = '▁' lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'} lowerCAmelCase_ = { 'vocab_file': { 'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model', } } lowerCAmelCase_ = { 'facebook/xglm-564M': 2_048, } class __A ( A_ ): '''simple docstring''' lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : int = ["input_ids", "attention_mask"] def __init__( self : int ,_snake_case : Dict ,_snake_case : Dict="<s>" ,_snake_case : Dict="</s>" ,_snake_case : str="</s>" ,_snake_case : Optional[Any]="<s>" ,_snake_case : Optional[Any]="<unk>" ,_snake_case : Optional[int]="<pad>" ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : str ,) -> None: """simple docstring""" lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer lowercase__ : Any = 7 lowercase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )] lowercase__ : Dict = kwargs.get('''additional_special_tokens''' ,[] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,) lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_snake_case ) ) lowercase__ : str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowercase__ : Optional[int] = 1 # Mimic fairseq token-to-id alignment for the first 4 token lowercase__ : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} lowercase__ : List[str] = len(self.sp_model ) lowercase__ : Tuple = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(_snake_case ) lowercase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : int ) -> Optional[int]: """simple docstring""" lowercase__ : List[Any] = self.__dict__.copy() lowercase__ : Optional[int] = None lowercase__ : Any = self.sp_model.serialized_model_proto() return state def __setstate__( self : Dict ,_snake_case : List[str] ) -> Any: """simple docstring""" lowercase__ : int = d # for backward compatibility if not hasattr(self ,'''sp_model_kwargs''' ): lowercase__ : Dict = {} lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.sep_token_id] + token_ids_a lowercase__ : Optional[Any] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case ) if token_ids_a is None: return [1] + ([0] * len(_snake_case )) return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase__ : List[Any] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def UpperCAmelCase ( self : str ) -> Tuple: """simple docstring""" return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def UpperCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" lowercase__ : Union[str, Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(_snake_case ,out_type=_snake_case ) def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ) -> List[Any]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase__ : Tuple = self.sp_model.PieceToId(_snake_case ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCAmelCase ( self : Any ,_snake_case : List[str] ) -> Any: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCAmelCase ( self : Tuple ,_snake_case : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase__ : Optional[Any] = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip() return out_string def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_snake_case ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__ : Any = os.path.join( _snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_snake_case ) elif not os.path.isfile(self.vocab_file ): with open(_snake_case ,'''wb''' ) as fi: lowercase__ : Dict = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (out_vocab_file,)
16
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration _SCREAMING_SNAKE_CASE = 50_00_00 _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = os.path.split(__file__) _SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def SCREAMING_SNAKE_CASE__ ( __a , **__a ): snake_case_ : int = dataset.map(**__a ) @get_duration def SCREAMING_SNAKE_CASE__ ( __a , **__a ): snake_case_ : Dict = dataset.filter(**__a ) def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : Tuple = {'num examples': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ : Dict = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} ) snake_case_ : List[Any] = generate_example_dataset( os.path.join(__a , 'dataset.arrow' ) , __a , num_examples=__a ) snake_case_ : str = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__a ) def tokenize(__a ): return tokenizer(examples['text'] ) snake_case_ : Any = map(__a ) snake_case_ : Tuple = map(__a , batched=__a ) snake_case_ : str = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='numpy' ): snake_case_ : Optional[int] = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='pandas' ): snake_case_ : str = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='torch' , columns='numbers' ): snake_case_ : int = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='tensorflow' , columns='numbers' ): snake_case_ : List[Any] = map(__a , function=lambda __a : None , batched=__a ) snake_case_ : int = map(__a , function=__a , batched=__a ) snake_case_ : Optional[Any] = filter(__a ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(__a , 'wb' ) as f: f.write(json.dumps(__a ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
327
0
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class _lowerCAmelCase : '''simple docstring''' lowerCAmelCase_ = None lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = True lowerCAmelCase_ = None lowerCAmelCase_ = 1 lowerCAmelCase_ = None lowerCAmelCase_ = False lowerCAmelCase_ = None lowerCAmelCase_ = None def lowercase (self ) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(UpperCAmelCase ) for k, v in self.__dict__.items()} )
270
'''simple docstring''' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if not (isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )): raise ValueError("""longest_common_substring() takes two strings for inputs""" ) _snake_case = len(_SCREAMING_SNAKE_CASE ) _snake_case = len(_SCREAMING_SNAKE_CASE ) _snake_case = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] _snake_case = 0 _snake_case = 0 for i in range(1 , texta_length + 1 ): for j in range(1 , texta_length + 1 ): if texta[i - 1] == texta[j - 1]: _snake_case = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: _snake_case = i _snake_case = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
270
1
'''simple docstring''' import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class lowerCamelCase_ (snake_case__ ): '''simple docstring''' def __init__( self : List[Any] ): _UpperCAmelCase : Union[str, Any] = [] def _A ( self : Any , A : Union[str, Any] , A : Optional[int] , A : List[str] , **A : Tuple ): self.events.append("on_init_end" ) def _A ( self : Any , A : str , A : List[Any] , A : List[Any] , **A : Tuple ): self.events.append("on_train_begin" ) def _A ( self : Tuple , A : List[str] , A : Tuple , A : int , **A : List[str] ): self.events.append("on_train_end" ) def _A ( self : Optional[Any] , A : Dict , A : Any , A : Optional[Any] , **A : List[Any] ): self.events.append("on_epoch_begin" ) def _A ( self : Optional[Any] , A : List[Any] , A : List[str] , A : Optional[int] , **A : Optional[int] ): self.events.append("on_epoch_end" ) def _A ( self : List[str] , A : Optional[int] , A : List[Any] , A : Union[str, Any] , **A : Any ): self.events.append("on_step_begin" ) def _A ( self : Tuple , A : Union[str, Any] , A : int , A : Optional[int] , **A : int ): self.events.append("on_step_end" ) def _A ( self : Optional[int] , A : Optional[Any] , A : Union[str, Any] , A : str , **A : Union[str, Any] ): self.events.append("on_evaluate" ) def _A ( self : Optional[Any] , A : Optional[int] , A : Dict , A : List[Any] , **A : Dict ): self.events.append("on_predict" ) def _A ( self : Dict , A : Dict , A : List[Any] , A : Dict , **A : str ): self.events.append("on_save" ) def _A ( self : Tuple , A : Optional[Any] , A : Union[str, Any] , A : Optional[int] , **A : Dict ): self.events.append("on_log" ) def _A ( self : Optional[int] , A : Optional[Any] , A : Tuple , A : Tuple , **A : List[str] ): self.events.append("on_prediction_step" ) @require_torch class lowerCamelCase_ (unittest.TestCase ): '''simple docstring''' def _A ( self : Optional[int] ): _UpperCAmelCase : Optional[Any] = tempfile.mkdtemp() def _A ( self : List[Any] ): shutil.rmtree(self.output_dir ) def _A ( self : Union[str, Any] , A : Optional[int]=0 , A : Optional[Any]=0 , A : Optional[Any]=64 , A : Dict=64 , A : Any=None , A : Tuple=False , **A : Optional[int] ): # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. _UpperCAmelCase : str = RegressionDataset(length=A ) _UpperCAmelCase : Union[str, Any] = RegressionDataset(length=A ) _UpperCAmelCase : Any = RegressionModelConfig(a=A , b=A ) _UpperCAmelCase : List[Any] = RegressionPreTrainedModel(A ) _UpperCAmelCase : Dict = TrainingArguments(self.output_dir , disable_tqdm=A , report_to=[] , **A ) return Trainer( A , A , train_dataset=A , eval_dataset=A , callbacks=A , ) def _A ( self : str , A : List[str] , A : List[str] ): self.assertEqual(len(A ) , len(A ) ) # Order doesn't matter _UpperCAmelCase : Tuple = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ ) _UpperCAmelCase : Any = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ ) for cba, cba in zip(A , A ): if isinstance(A , A ) and isinstance(A , A ): self.assertEqual(A , A ) elif isinstance(A , A ) and not isinstance(A , A ): self.assertEqual(A , cba.__class__ ) elif not isinstance(A , A ) and isinstance(A , A ): self.assertEqual(cba.__class__ , A ) else: self.assertEqual(A , A ) def _A ( self : int , A : List[str] ): _UpperCAmelCase : List[str] = ["on_init_end", "on_train_begin"] _UpperCAmelCase : str = 0 _UpperCAmelCase : Optional[Any] = len(trainer.get_eval_dataloader() ) _UpperCAmelCase : Optional[int] = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"] for _ in range(trainer.state.num_train_epochs ): expected_events.append("on_epoch_begin" ) for _ in range(A ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("on_log" ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("on_save" ) expected_events.append("on_epoch_end" ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def _A ( self : str ): _UpperCAmelCase : Any = self.get_trainer() _UpperCAmelCase : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) # Callbacks passed at init are added to the default callbacks _UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback _UpperCAmelCase : List[Any] = self.get_trainer(disable_tqdm=A ) _UpperCAmelCase : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) def _A ( self : Optional[Any] ): _UpperCAmelCase : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback] _UpperCAmelCase : Dict = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(A ) expected_callbacks.remove(A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) _UpperCAmelCase : Optional[Any] = self.get_trainer() _UpperCAmelCase : Any = trainer.pop_callback(A ) self.assertEqual(cb.__class__ , A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) trainer.add_callback(A ) expected_callbacks.insert(0 , A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) # We can also add, pop, or remove by instance _UpperCAmelCase : Union[str, Any] = self.get_trainer() _UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[0] trainer.remove_callback(A ) expected_callbacks.remove(A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) _UpperCAmelCase : List[Any] = self.get_trainer() _UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[0] _UpperCAmelCase : Union[str, Any] = trainer.pop_callback(A ) self.assertEqual(A , A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) trainer.add_callback(A ) expected_callbacks.insert(0 , A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) def _A ( self : Optional[Any] ): import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="ignore" , category=A ) _UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() _UpperCAmelCase : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) # Independent log/save/eval _UpperCAmelCase : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() _UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) _UpperCAmelCase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() _UpperCAmelCase : Tuple = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) _UpperCAmelCase : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" ) trainer.train() _UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) _UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" ) trainer.train() _UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) # A bit of everything _UpperCAmelCase : int = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , ) trainer.train() _UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) # warning should be emitted for duplicated callbacks with patch("transformers.trainer_callback.logger.warning" ) as warn_mock: _UpperCAmelCase : Optional[Any] = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(A ) in warn_mock.call_args[0][0]
31
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer __snake_case = logging.get_logger(__name__) __snake_case = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} # See all BART models at https://huggingface.co/models?filter=bart __snake_case = { """vocab_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""", }, """merges_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""", }, """tokenizer_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""", }, } __snake_case = { """facebook/bart-base""": 10_24, """facebook/bart-large""": 10_24, """facebook/bart-large-mnli""": 10_24, """facebook/bart-large-cnn""": 10_24, """facebook/bart-large-xsum""": 10_24, """yjernite/bart_eli5""": 10_24, } class lowercase__ ( _UpperCAmelCase ): A__ : Tuple =VOCAB_FILES_NAMES A__ : Any =PRETRAINED_VOCAB_FILES_MAP A__ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Tuple =["""input_ids""", """attention_mask"""] A__ : Optional[int] =BartTokenizer def __init__( self : str , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : List[Any]="</s>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : Tuple="<s>" , UpperCAmelCase_ : Any="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=True , **UpperCAmelCase_ : List[Any] , ): super().__init__( UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , UpperCAmelCase_ ) != add_prefix_space: SCREAMING_SNAKE_CASE__ = getattr(UpperCAmelCase_ , pre_tok_state.pop('type' ) ) SCREAMING_SNAKE_CASE__ = add_prefix_space SCREAMING_SNAKE_CASE__ = pre_tok_class(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` SCREAMING_SNAKE_CASE__ = 'post_processor' SCREAMING_SNAKE_CASE__ = getattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ ) if tokenizer_component_instance: SCREAMING_SNAKE_CASE__ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: SCREAMING_SNAKE_CASE__ = tuple(state['sep'] ) if "cls" in state: SCREAMING_SNAKE_CASE__ = tuple(state['cls'] ) SCREAMING_SNAKE_CASE__ = False if state.get('add_prefix_space' , UpperCAmelCase_ ) != add_prefix_space: SCREAMING_SNAKE_CASE__ = add_prefix_space SCREAMING_SNAKE_CASE__ = True if state.get('trim_offsets' , UpperCAmelCase_ ) != trim_offsets: SCREAMING_SNAKE_CASE__ = trim_offsets SCREAMING_SNAKE_CASE__ = True if changes_to_apply: SCREAMING_SNAKE_CASE__ = getattr(UpperCAmelCase_ , state.pop('type' ) ) SCREAMING_SNAKE_CASE__ = component_class(**UpperCAmelCase_ ) setattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ ) @property def A_ ( self : Tuple ): if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def A_ ( self : Any , UpperCAmelCase_ : List[Any] ): SCREAMING_SNAKE_CASE__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else value SCREAMING_SNAKE_CASE__ = value def A_ ( self : List[str] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : str ): SCREAMING_SNAKE_CASE__ = kwargs.get('is_split_into_words' , UpperCAmelCase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ ) def A_ ( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE__ = kwargs.get('is_split_into_words' , UpperCAmelCase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' 'to use it with pretokenized inputs.' ) return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ ) def A_ ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ): SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ ) return tuple(UpperCAmelCase_ ) def A_ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]=None ): SCREAMING_SNAKE_CASE__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def A_ ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
176
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __A =logging.get_logger(__name__) __A ={ '''google/realm-cc-news-pretrained-embedder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json''' ), '''google/realm-cc-news-pretrained-encoder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json''' ), '''google/realm-cc-news-pretrained-scorer''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json''' ), '''google/realm-cc-news-pretrained-openqa''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json''' ), '''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''', '''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''', '''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''', '''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''', # See all REALM models at https://huggingface.co/models?filter=realm } class _SCREAMING_SNAKE_CASE ( snake_case_ ): lowerCAmelCase__ = 'realm' def __init__( self , lowercase=30522 , lowercase=768 , lowercase=128 , lowercase=12 , lowercase=12 , lowercase=8 , lowercase=3072 , lowercase="gelu_new" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.0_2 , lowercase=1e-12 , lowercase=256 , lowercase=10 , lowercase=1e-3 , lowercase=5 , lowercase=320 , lowercase=13353718 , lowercase=5000 , lowercase=1 , lowercase=0 , lowercase=2 , **lowercase , ) -> str: super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) # Common config lowerCamelCase_ = vocab_size lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = hidden_size lowerCamelCase_ = retriever_proj_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = num_candidates lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = initializer_range lowerCamelCase_ = type_vocab_size lowerCamelCase_ = layer_norm_eps # Reader config lowerCamelCase_ = span_hidden_size lowerCamelCase_ = max_span_width lowerCamelCase_ = reader_layer_norm_eps lowerCamelCase_ = reader_beam_size lowerCamelCase_ = reader_seq_len # Retrieval config lowerCamelCase_ = num_block_records lowerCamelCase_ = searcher_beam_size
47
from collections import defaultdict def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = first_str.lower().strip() lowerCamelCase_ = second_str.lower().strip() # Remove whitespace lowerCamelCase_ = first_str.replace(" " , "" ) lowerCamelCase_ = second_str.replace(" " , "" ) # Strings of different lengths are not anagrams if len(lowerCamelCase__ ) != len(lowerCamelCase__ ): return False # Default values for count should be 0 lowerCamelCase_ = defaultdict(lowerCamelCase__ ) # For each character in input strings, # increment count in the corresponding for i in range(len(lowerCamelCase__ ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() __A =input('''Enter the first string ''').strip() __A =input('''Enter the second string ''').strip() __A =check_anagrams(input_a, input_b) print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
47
1
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class a__ ( datasets.BuilderConfig ): __lowerCAmelCase = None class a__ ( datasets.ArrowBasedBuilder ): __lowerCAmelCase = PandasConfig def __magic_name__ ( self ): return datasets.DatasetInfo(features=self.config.features ) def __magic_name__ ( self , _a ): if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) lowercase : Tuple = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_a , (str, list, tuple) ): lowercase : Tuple = data_files if isinstance(_a , _a ): lowercase : Optional[Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive lowercase : List[Any] = [dl_manager.iter_files(_a ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] lowercase : Any = [] for split_name, files in data_files.items(): if isinstance(_a , _a ): lowercase : Tuple = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive lowercase : Optional[int] = [dl_manager.iter_files(_a ) for file in files] splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={"files": files} ) ) return splits def __magic_name__ ( self , _a ): if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example lowercase : Optional[Any] = table_cast(_a , self.config.features.arrow_schema ) return pa_table def __magic_name__ ( self , _a ): for i, file in enumerate(itertools.chain.from_iterable(_a ) ): with open(_a , "rb" ) as f: lowercase : Any = pa.Table.from_pandas(pd.read_pickle(_a ) ) yield i, self._cast_table(_a )
202
"""simple docstring""" import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def __magic_name__ ( __snake_case : Union[str, Any] , __snake_case : List[str]=7 ) -> str: lowercase : int = None if token is not None: lowercase : Any = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""} # The id of a workflow (not of a workflow run) lowercase : int = "636036" lowercase : Dict = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" lowercase : int = requests.get(__snake_case , headers=__snake_case ).json() return result["workflow_runs"] def __magic_name__ ( __snake_case : Dict ) -> Tuple: lowercase : Tuple = get_daily_ci_runs(__snake_case ) lowercase : Union[str, Any] = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": lowercase : List[Any] = workflow_run["id"] break return workflow_run_id def __magic_name__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> int: lowercase : Dict = get_last_daily_ci_runs(__snake_case ) if workflow_run_id is not None: lowercase : Dict = get_artifacts_links(worflow_run_id=__snake_case , token=__snake_case ) for artifact_name in artifact_names: if artifact_name in artifacts_links: lowercase : Optional[int] = artifacts_links[artifact_name] download_artifact( artifact_name=__snake_case , artifact_url=__snake_case , output_dir=__snake_case , token=__snake_case ) def __magic_name__ ( __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Tuple ) -> Optional[int]: get_last_daily_ci_artifacts(__snake_case , __snake_case , __snake_case ) lowercase : str = {} for artifact_name in artifact_names: lowercase : Optional[Any] = os.path.join(__snake_case , f"""{artifact_name}.zip""" ) if os.path.isfile(__snake_case ): lowercase : List[Any] = {} with zipfile.ZipFile(__snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(__snake_case ): # read the file with z.open(__snake_case ) as f: lowercase : str = f.read().decode("UTF-8" ) return results
202
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Tuple = logging.get_logger(__name__) UpperCAmelCase_ : Tuple = { "unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json", } class UpperCamelCase ( _UpperCAmelCase ): lowerCAmelCase : Optional[Any] = """lxmert""" lowerCAmelCase : Dict = {} def __init__( self , UpperCAmelCase__=30_522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=9_500 , UpperCAmelCase__=1_600 , UpperCAmelCase__=400 , UpperCAmelCase__=3_072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.02 , UpperCAmelCase__=1e-1_2 , UpperCAmelCase__=9 , UpperCAmelCase__=5 , UpperCAmelCase__=5 , UpperCAmelCase__=2_048 , UpperCAmelCase__=4 , UpperCAmelCase__=6.67 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , **UpperCAmelCase__ , ): A__ = vocab_size A__ = hidden_size A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = num_qa_labels A__ = num_object_labels A__ = num_attr_labels A__ = l_layers A__ = x_layers A__ = r_layers A__ = visual_feat_dim A__ = visual_pos_dim A__ = visual_loss_normalizer A__ = task_matched A__ = task_mask_lm A__ = task_obj_predict A__ = task_qa A__ = visual_obj_loss A__ = visual_attr_loss A__ = visual_feat_loss A__ = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers} super().__init__(**UpperCAmelCase__ )
365
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) def UpperCamelCase ( _A : str , _A : str )-> Any: """simple docstring""" A__ = RobertaPreLayerNormConfig.from_pretrained( _A , architectures=["RobertaPreLayerNormForMaskedLM"] ) # convert state_dict A__ = torch.load(hf_hub_download(repo_id=_A , filename="pytorch_model.bin" ) ) A__ = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta." ): A__ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ): continue A__ = tensor_value A__ = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=_A , config=_A , state_dict=_A ) model.save_pretrained(_A ) # convert tokenizer A__ = AutoTokenizer.from_pretrained(_A ) tokenizer.save_pretrained(_A ) if __name__ == "__main__": UpperCAmelCase_ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint-repo", default=None, type=str, required=True, help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) UpperCAmelCase_ : List[Any] = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
198
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __a : Any = logging.get_logger(__name__) __a : Union[str, Any] = { """microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""", """microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""", """microsoft/deberta-v2-xlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json""" ), """microsoft/deberta-v2-xxlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json""" ), } class _UpperCamelCase ( _UpperCAmelCase ): """simple docstring""" __a : Any = '''deberta-v2''' def __init__( self , lowerCAmelCase__=12_81_00 , lowerCAmelCase__=15_36 , lowerCAmelCase__=24 , lowerCAmelCase__=24 , lowerCAmelCase__=61_44 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-7 , lowerCAmelCase__=False , lowerCAmelCase__=-1 , lowerCAmelCase__=0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=0 , lowerCAmelCase__="gelu" , **lowerCAmelCase__ , ) -> Optional[Any]: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = type_vocab_size __lowercase = initializer_range __lowercase = relative_attention __lowercase = max_relative_positions __lowercase = pad_token_id __lowercase = position_biased_input # Backwards compatibility if type(lowerCAmelCase__ ) == str: __lowercase = [x.strip() for x in pos_att_type.lower().split('''|''' )] __lowercase = pos_att_type __lowercase = vocab_size __lowercase = layer_norm_eps __lowercase = kwargs.get('''pooler_hidden_size''' , lowerCAmelCase__ ) __lowercase = pooler_dropout __lowercase = pooler_hidden_act class _UpperCamelCase ( _UpperCAmelCase ): """simple docstring""" @property def _SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": __lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowercase = {0: '''batch''', 1: '''sequence'''} if self._config.type_vocab_size > 0: return OrderedDict( [('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] ) else: return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] ) @property def _SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' return 12 def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 40 , lowerCAmelCase__ = 40 , lowerCAmelCase__ = None , ) -> Mapping[str, Any]: '''simple docstring''' __lowercase = super().generate_dummy_inputs(preprocessor=lowerCAmelCase__ , framework=lowerCAmelCase__ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
210
import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class _UpperCamelCase ( _UpperCAmelCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' __lowercase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''hidden_sizes''' ) ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''num_attention_heads''' ) ) class _UpperCamelCase : """simple docstring""" def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=64 , lowerCAmelCase__=3 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=16 , lowerCAmelCase__=[1_28, 2_56, 3_84] , lowerCAmelCase__=[4, 6, 8] , lowerCAmelCase__=[2, 3, 4] , lowerCAmelCase__=[16, 16, 16] , lowerCAmelCase__=0 , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=2 , ) -> Tuple: '''simple docstring''' __lowercase = parent __lowercase = batch_size __lowercase = image_size __lowercase = num_channels __lowercase = kernel_size __lowercase = stride __lowercase = padding __lowercase = hidden_sizes __lowercase = num_attention_heads __lowercase = depths __lowercase = key_dim __lowercase = drop_path_rate __lowercase = patch_size __lowercase = attention_ratio __lowercase = mlp_ratio __lowercase = initializer_range __lowercase = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] __lowercase = is_training __lowercase = use_labels __lowercase = num_labels __lowercase = initializer_range def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' __lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] , self.num_labels ) __lowercase = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' return LevitConfig( image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , ) def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: '''simple docstring''' __lowercase = LevitModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() __lowercase = model(lowerCAmelCase__ ) __lowercase = (self.image_size, self.image_size) __lowercase , __lowercase = image_size[0], image_size[1] for _ in range(4 ): __lowercase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) __lowercase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , ) def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: '''simple docstring''' __lowercase = self.num_labels __lowercase = LevitForImageClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() __lowercase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' __lowercase = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = config_and_inputs __lowercase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _UpperCamelCase ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" __a : int = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) __a : List[str] = ( { '''feature-extraction''': LevitModel, '''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) __a : int = False __a : Dict = False __a : Optional[Any] = False __a : Optional[int] = False __a : Dict = False def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' __lowercase = LevitModelTester(self ) __lowercase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' return @unittest.skip(reason='''Levit does not use inputs_embeds''' ) def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' pass @unittest.skip(reason='''Levit does not support input and output embeddings''' ) def _SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' pass @unittest.skip(reason='''Levit does not output attentions''' ) def _SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(lowerCAmelCase__ ) __lowercase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase = [*signature.parameters.keys()] __lowercase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): __lowercase = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): __lowercase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) __lowercase = outputs.hidden_states __lowercase = len(self.model_tester.depths ) + 1 self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) __lowercase = (self.model_tester.image_size, self.model_tester.image_size) __lowercase , __lowercase = image_size[0], image_size[1] for _ in range(4 ): __lowercase = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) __lowercase = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [ height * width, self.model_tester.hidden_sizes[0], ] , ) __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> str: '''simple docstring''' __lowercase = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' if not self.model_tester.is_training: return __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(lowerCAmelCase__ ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue __lowercase = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.train() __lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) __lowercase = model(**lowerCAmelCase__ ).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return __lowercase = False __lowercase = True for model_class in self.all_model_classes: if model_class in get_values(lowerCAmelCase__ ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue __lowercase = model_class(lowerCAmelCase__ ) model.gradient_checkpointing_enable() model.to(lowerCAmelCase__ ) model.train() __lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) __lowercase = model(**lowerCAmelCase__ ).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = [ {'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float}, {'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long}, {'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(lowerCAmelCase__ ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ): __lowercase = problem_type['''title'''] __lowercase = problem_type['''num_labels'''] __lowercase = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.train() __lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) if problem_type["num_labels"] > 1: __lowercase = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] ) __lowercase = inputs['''labels'''].to(problem_type['''dtype'''] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=lowerCAmelCase__ ) as warning_list: __lowercase = model(**lowerCAmelCase__ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() @slow def _SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = LevitModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def UpperCAmelCase ( ): """simple docstring""" __lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _UpperCamelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def _SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' __lowercase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( lowerCAmelCase__ ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ ) # forward pass with torch.no_grad(): __lowercase = model(**lowerCAmelCase__ ) # verify the logits __lowercase = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) __lowercase = torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
210
1
import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase): @property def snake_case__ ( self): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = ort.SessionOptions() _lowerCAmelCase : int = False return options def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png") _lowerCAmelCase : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png") _lowerCAmelCase : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy") # using the PNDM scheduler by default _lowerCAmelCase : Optional[int] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=__a) _lowerCAmelCase : Any = "A red cat sitting on a park bench" _lowerCAmelCase : Optional[Any] = np.random.RandomState(0) _lowerCAmelCase : Any = pipe( prompt=__a, image=__a, mask_image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=__a, output_type="np", ) _lowerCAmelCase : Optional[int] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 1E-2
300
import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP _snake_case = False try: _snake_case = _is_package_available("google.colab") except ModuleNotFoundError: pass @input.register class UpperCAmelCase_ : def __init__( self, __a = None, __a = []): '''simple docstring''' _lowerCAmelCase : Optional[int] = 0 _lowerCAmelCase : Optional[int] = choices _lowerCAmelCase : Tuple = prompt if sys.platform == "win32": _lowerCAmelCase : Optional[Any] = "*" else: _lowerCAmelCase : Dict = "➔ " def snake_case__ ( self, __a, __a = ""): '''simple docstring''' if sys.platform != "win32": writeColor(self.choices[index], 32, __a) else: forceWrite(self.choices[index], __a) def snake_case__ ( self, __a): '''simple docstring''' if index == self.position: forceWrite(f" {self.arrow_char} ") self.write_choice(__a) else: forceWrite(f" {self.choices[index]}") reset_cursor() def snake_case__ ( self, __a, __a = 1): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(__a) move_cursor(__a, direction.name) self.print_choice(self.position) @input.mark(KEYMAP["up"]) def snake_case__ ( self): '''simple docstring''' self.move_direction(Direction.UP) @input.mark(KEYMAP["down"]) def snake_case__ ( self): '''simple docstring''' self.move_direction(Direction.DOWN) @input.mark(KEYMAP["newline"]) def snake_case__ ( self): '''simple docstring''' move_cursor(len(self.choices) - self.position, "DOWN") return self.position @input.mark(KEYMAP["interrupt"]) def snake_case__ ( self): '''simple docstring''' move_cursor(len(self.choices) - self.position, "DOWN") raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(__a)] for number in range(10)]) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : str = int(chr(self.current_selection)) _lowerCAmelCase : List[str] = index - self.position if index == self.position: return if index < len(self.choices): if self.position > index: self.move_direction(Direction.UP, -movement) elif self.position < index: self.move_direction(Direction.DOWN, __a) else: return else: return def snake_case__ ( self, __a = 0): '''simple docstring''' if self.prompt: linebreak() forceWrite(self.prompt, "\n") if in_colab: forceWrite("Please input a choice index (starting from 0), and press enter", "\n") else: forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n") _lowerCAmelCase : List[Any] = default_choice for i in range(len(self.choices)): self.print_choice(__a) forceWrite("\n") move_cursor(len(self.choices) - self.position, "UP") with cursor.hide(): while True: if in_colab: try: _lowerCAmelCase : str = int(builtins.input()) except ValueError: _lowerCAmelCase : List[Any] = default_choice else: _lowerCAmelCase : List[str] = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices) + 1): move_cursor(1, "UP") clear_line() self.write_choice(__a, "\n") return choice
300
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase__ : int = { 'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'], 'tokenization_ctrl': ['CTRLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : List[str] = [ 'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST', 'CTRLForSequenceClassification', 'CTRLLMHeadModel', 'CTRLModel', 'CTRLPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : int = [ 'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFCTRLForSequenceClassification', 'TFCTRLLMHeadModel', 'TFCTRLModel', 'TFCTRLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
98
"""simple docstring""" import functools def a_ ( lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = len(lowerCamelCase ) UpperCAmelCase__ = len(lowerCamelCase ) @functools.cache def min_distance(lowerCamelCase , lowerCamelCase ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCAmelCase__ = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , lowerCamelCase ) , 1 + min_distance(lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
98
1
from __future__ import annotations def lowerCamelCase_ ( _a , _a , _a ): """simple docstring""" if days_between_payments <= 0: raise ValueError('''days_between_payments must be > 0''' ) if daily_interest_rate < 0: raise ValueError('''daily_interest_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * daily_interest_rate * days_between_payments def lowerCamelCase_ ( _a , _a , _a , ): """simple docstring""" if number_of_compounding_periods <= 0: raise ValueError('''number_of_compounding_periods must be > 0''' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def lowerCamelCase_ ( _a , _a , _a , ): """simple docstring""" if number_of_years <= 0: raise ValueError('''number_of_years must be > 0''' ) if nominal_annual_percentage_rate < 0: raise ValueError('''nominal_annual_percentage_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return compound_interest( _A , nominal_annual_percentage_rate / 365 , number_of_years * 365 ) if __name__ == "__main__": import doctest doctest.testmod()
358
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def lowerCamelCase_ ( _a , _a ): """simple docstring""" lowerCAmelCase__ : List[Any] = k_size // 2 lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center] lowerCAmelCase__ : str = 1 / (2 * pi * sigma) * exp(-(square(_a ) + square(_a )) / (2 * square(_a )) ) return g def lowerCamelCase_ ( _a , _a , _a ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : int = image.shape[0], image.shape[1] # dst image height and width lowerCAmelCase__ : Any = height - k_size + 1 lowerCAmelCase__ : Tuple = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows lowerCAmelCase__ : int = zeros((dst_height * dst_width, k_size * k_size) ) lowerCAmelCase__ : List[str] = 0 for i, j in product(range(_a ) , range(_a ) ): lowerCAmelCase__ : Union[str, Any] = ravel(image[i : i + k_size, j : j + k_size] ) lowerCAmelCase__ : List[Any] = window row += 1 # turn the kernel into shape(k*k, 1) lowerCAmelCase__ : List[Any] = gen_gaussian_kernel(_a , _a ) lowerCAmelCase__ : str = ravel(_a ) # reshape and get the dst image lowerCAmelCase__ : int = dot(_a , _a ).reshape(_a , _a ).astype(_a ) return dst if __name__ == "__main__": # read original image lowerCamelCase = imread(R'''../image_data/lena.jpg''') # turn image in gray scale value lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size lowerCamelCase = gaussian_filter(gray, 3, sigma=1) lowerCamelCase = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow('''gaussian filter with 3x3 mask''', gaussianaxa) imshow('''gaussian filter with 5x5 mask''', gaussianaxa) waitKey()
211
0
'''simple docstring''' import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> Any: UpperCAmelCase : Optional[int] = 10 def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Dict = [1, 2, 3, 4] UpperCAmelCase : List[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(A , self.block_size , 0 ) , A ) def _lowercase( self ) -> List[Any]: UpperCAmelCase : Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] UpperCAmelCase : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(A , self.block_size , 0 ) , A ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] UpperCAmelCase : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(A , self.block_size , 0 ) , A ) def _lowercase( self ) -> Any: UpperCAmelCase : List[Any] = """It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.""" UpperCAmelCase , UpperCAmelCase : List[str] = process_story(A ) self.assertEqual(A , [] ) def _lowercase( self ) -> Any: UpperCAmelCase : List[Any] = """""" UpperCAmelCase , UpperCAmelCase : Any = process_story(A ) self.assertEqual(A , [] ) self.assertEqual(A , [] ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Any = ( """It was the year of Our Lord one thousand seven hundred and """ """seventy-five\n\nSpiritual revelations were conceded to England """ """at that favoured period, as at this.\n@highlight\n\nIt was the best of times""" ) UpperCAmelCase , UpperCAmelCase : Optional[int] = process_story(A ) UpperCAmelCase : Union[str, Any] = [ """It was the year of Our Lord one thousand seven hundred and seventy-five.""", """Spiritual revelations were conceded to England at that favoured period, as at this.""", ] self.assertEqual(A , A ) UpperCAmelCase : Optional[Any] = ["""It was the best of times."""] self.assertEqual(A , A ) def _lowercase( self ) -> Dict: UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3, 4] ) UpperCAmelCase : List[str] = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(A , 0 ).numpy() , expected.numpy() ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) UpperCAmelCase : Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(A , 23 ).numpy() , expected.numpy() ) def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[int] = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) UpperCAmelCase : str = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(A , 1 ).numpy() , expected.numpy() ) def _lowercase( self ) -> List[Any]: UpperCAmelCase : Any = 101 UpperCAmelCase : Any = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) UpperCAmelCase : List[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) UpperCAmelCase : Optional[int] = compute_token_type_ids(A , A ) np.testing.assert_array_equal(A , A )
265
'''simple docstring''' import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class UpperCamelCase_ : def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Optional[int]: UpperCAmelCase : List[Any] = parent UpperCAmelCase : Optional[int] = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : Dict = use_input_mask UpperCAmelCase : str = use_token_type_ids UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : Dict = hidden_size UpperCAmelCase : Dict = num_hidden_layers UpperCAmelCase : Optional[int] = num_attention_heads UpperCAmelCase : int = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : List[str] = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : str = max_position_embeddings UpperCAmelCase : Optional[Any] = type_vocab_size UpperCAmelCase : List[str] = type_sequence_label_size UpperCAmelCase : int = initializer_range UpperCAmelCase : str = num_labels UpperCAmelCase : Optional[int] = num_choices UpperCAmelCase : Dict = scope UpperCAmelCase : Union[str, Any] = vocab_size - 1 def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Any = None if self.use_input_mask: UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : List[str] = None if self.use_labels: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : Optional[int] = self.get_config() return config, input_ids, input_mask, token_labels def _lowercase( self ) -> Optional[Any]: return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.prepare_config_and_inputs() UpperCAmelCase : Any = True return config, input_ids, input_mask, token_labels def _lowercase( self , A , A , A ) -> int: UpperCAmelCase : str = GPTNeoXModel(config=A ) model.to(A ) model.eval() UpperCAmelCase : List[str] = model(A , attention_mask=A ) UpperCAmelCase : List[str] = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A ) -> Optional[int]: UpperCAmelCase : str = True UpperCAmelCase : Optional[Any] = GPTNeoXModel(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = model(A , attention_mask=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A , A ) -> List[str]: UpperCAmelCase : Tuple = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase( self , A , A , A , A ) -> Tuple: UpperCAmelCase : List[str] = self.num_labels UpperCAmelCase : Any = GPTNeoXForQuestionAnswering(A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase( self , A , A , A , A ) -> int: UpperCAmelCase : Tuple = self.num_labels UpperCAmelCase : List[str] = GPTNeoXForSequenceClassification(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase( self , A , A , A , A ) -> str: UpperCAmelCase : List[Any] = self.num_labels UpperCAmelCase : Tuple = GPTNeoXForTokenClassification(A ) model.to(A ) model.eval() UpperCAmelCase : int = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = True UpperCAmelCase : str = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() # first forward pass UpperCAmelCase : List[str] = model(A , attention_mask=A , use_cache=A ) UpperCAmelCase : List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase : Dict = model(A , attention_mask=A , output_hidden_states=A ) UpperCAmelCase : Any = output_from_no_past["""hidden_states"""][0] UpperCAmelCase : List[str] = model( A , attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0] # select random slice UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) ) def _lowercase( self ) -> int: UpperCAmelCase : Tuple = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowercase = (GPTNeoXForCausalLM,) if is_torch_available() else () lowercase = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = GPTNeoXModelTester(self ) UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A , hidden_size=64 , num_attention_heads=8 ) def _lowercase( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> Optional[Any]: # This regression test was failing with PyTorch < 1.3 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCAmelCase : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(A , A , A ) def _lowercase( self ) -> int: UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*A ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) def _lowercase( self ) -> Any: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) @unittest.skip(reason="""Feed forward chunking is not implemented""" ) def _lowercase( self ) -> Optional[int]: pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def _lowercase( self , A ) -> str: UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : int = ids_tensor([1, 10] , config.vocab_size ) UpperCAmelCase : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Dict = GPTNeoXModel(A ) original_model.to(A ) original_model.eval() UpperCAmelCase : List[str] = original_model(A ).last_hidden_state UpperCAmelCase : Any = original_model(A ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Any = {"""type""": scaling_type, """factor""": 1_0.0} UpperCAmelCase : str = GPTNeoXModel(A ) scaled_model.to(A ) scaled_model.eval() UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A , A , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _lowercase( self ) -> List[Any]: UpperCAmelCase : str = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) for checkpointing in [True, False]: UpperCAmelCase : int = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(A ) UpperCAmelCase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 UpperCAmelCase : List[str] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure""" UpperCAmelCase : Union[str, Any] = model.generate(**A , do_sample=A , max_new_tokens=20 ) UpperCAmelCase : Tuple = tokenizer.batch_decode(A )[0] self.assertEqual(A , A )
265
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE :Optional[int] = { '''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''], '''tokenization_roformer''': ['''RoFormerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE :List[str] = ['''RoFormerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE :int = [ '''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoFormerForCausalLM''', '''RoFormerForMaskedLM''', '''RoFormerForMultipleChoice''', '''RoFormerForQuestionAnswering''', '''RoFormerForSequenceClassification''', '''RoFormerForTokenClassification''', '''RoFormerLayer''', '''RoFormerModel''', '''RoFormerPreTrainedModel''', '''load_tf_weights_in_roformer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE :Tuple = [ '''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRoFormerForCausalLM''', '''TFRoFormerForMaskedLM''', '''TFRoFormerForMultipleChoice''', '''TFRoFormerForQuestionAnswering''', '''TFRoFormerForSequenceClassification''', '''TFRoFormerForTokenClassification''', '''TFRoFormerLayer''', '''TFRoFormerModel''', '''TFRoFormerPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE :Dict = [ '''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxRoFormerForMaskedLM''', '''FlaxRoFormerForMultipleChoice''', '''FlaxRoFormerForQuestionAnswering''', '''FlaxRoFormerForSequenceClassification''', '''FlaxRoFormerForTokenClassification''', '''FlaxRoFormerModel''', '''FlaxRoFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
350
'''simple docstring''' def UpperCAmelCase_ ( __lowercase : list ) -> list: '''simple docstring''' _UpperCAmelCase = False while is_sorted is False: # Until all the indices are traversed keep looping _UpperCAmelCase = True for i in range(0 , len(__lowercase ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: _UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i] # swapping if elements not in order _UpperCAmelCase = False for i in range(1 , len(__lowercase ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: _UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i] # swapping if elements not in order _UpperCAmelCase = False return input_list if __name__ == "__main__": print('''Enter list to be sorted''') __SCREAMING_SNAKE_CASE :Optional[Any] = [int(x) for x in input().split()] # inputing elements of the list in one line __SCREAMING_SNAKE_CASE :List[str] = odd_even_sort(input_list) print('''The sorted list is''') print(sorted_list)
156
0
'''simple docstring''' import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = logging.get_logger() # the current default level is logging.WARNING _A = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(__snake_case ) def lowerCAmelCase ( self : int ): '''simple docstring''' _A = logging.get_verbosity() _A = logging.get_logger("transformers.models.bart.tokenization_bart" ) _A = "Testing 1, 2, 3" # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(__snake_case ) as cl: logger.warning(__snake_case ) self.assertEqual(cl.out , msg + "\n" ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(__snake_case ) as cl: logger.warning(__snake_case ) self.assertEqual(cl.out , "" ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(__snake_case ) as cl: logger.warning(__snake_case ) self.assertEqual(cl.out , msg + "\n" ) # restore to the original level logging.set_verbosity(__snake_case ) @mockenv(TRANSFORMERS_VERBOSITY="error" ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' transformers.utils.logging._reset_library_root_logger() # this action activates the env var _A = logging.get_logger("transformers.models.bart.tokenization_bart" ) _A = os.getenv("TRANSFORMERS_VERBOSITY" , __snake_case ) _A = logging.log_levels[env_level_str] _A = logging.get_verbosity() self.assertEqual( __snake_case , __snake_case , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , ) # restore to the original level _A = "" transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY="super-error" ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' transformers.utils.logging._reset_library_root_logger() _A = logging.logging.getLogger() with CaptureLogger(__snake_case ) as cl: # this action activates the env var logging.get_logger("transformers.models.bart.tokenization_bart" ) self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out ) # no need to restore as nothing was changed def lowerCAmelCase ( self : Dict ): '''simple docstring''' transformers.utils.logging._reset_library_root_logger() _A = logging.get_logger("transformers.models.bart.tokenization_bart" ) _A = "Testing 1, 2, 3" with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ): # nothing should be logged as env var disables this method with CaptureLogger(__snake_case ) as cl: logger.warning_advice(__snake_case ) self.assertEqual(cl.out , "" ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(__snake_case ) as cl: logger.warning_advice(__snake_case ) self.assertEqual(cl.out , msg + "\n" ) def __lowercase ( ) -> Optional[Any]: '''simple docstring''' disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
79
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class UpperCAmelCase : _lowercase: List[str] _lowercase: Optional[str] = None # Automatically constructed _lowercase: ClassVar[str] = "dict" _lowercase: ClassVar[Any] = None _lowercase: str = field(default='''Translation''' , init=snake_case_ , repr=snake_case_ ) def __call__( self : Optional[int] ) -> Optional[int]: return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowercase__ ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class UpperCAmelCase : _lowercase: Optional[List] = None _lowercase: Optional[int] = None _lowercase: Optional[str] = None # Automatically constructed _lowercase: ClassVar[str] = "dict" _lowercase: ClassVar[Any] = None _lowercase: str = field(default='''TranslationVariableLanguages''' , init=snake_case_ , repr=snake_case_ ) def lowercase__ ( self : Any ) -> Optional[Any]: _lowerCAmelCase = sorted(set(self.languages ) ) if self.languages else None _lowerCAmelCase = len(self.languages ) if self.languages else None def __call__( self : List[str] ) -> Optional[Any]: return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def lowercase__ ( self : Optional[Any] , __snake_case : Tuple ) -> Any: _lowerCAmelCase = set(self.languages ) if self.languages and set(__snake_case ) - lang_set: raise ValueError( f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. _lowerCAmelCase = [] for lang, text in translation_dict.items(): if isinstance(__snake_case , __snake_case ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. _lowerCAmelCase , _lowerCAmelCase = zip(*sorted(__snake_case ) ) return {"language": languages, "translation": translations} def lowercase__ ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
70
0
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # TODO Update this SCREAMING_SNAKE_CASE__ = { """facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""", # See all ESM models at https://huggingface.co/models?filter=esm } class __lowerCamelCase ( snake_case_ ): """simple docstring""" lowerCAmelCase__ = "esm" def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1026 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Tuple: '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase , mask_token_id=UpperCAmelCase , **UpperCAmelCase ) lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = position_embedding_type lowercase_ = use_cache lowercase_ = emb_layer_norm_before lowercase_ = token_dropout lowercase_ = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("No esmfold_config supplied for folding model, using default values." ) lowercase_ = EsmFoldConfig() elif isinstance(UpperCAmelCase , UpperCAmelCase ): lowercase_ = EsmFoldConfig(**UpperCAmelCase ) lowercase_ = esmfold_config if vocab_list is None: logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" ) lowercase_ = get_default_vocab_list() else: lowercase_ = vocab_list else: lowercase_ = None lowercase_ = None if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , UpperCAmelCase ): raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" ) def A__ ( self ) -> List[str]: '''simple docstring''' lowercase_ = super().to_dict() if isinstance(self.esmfold_config , UpperCAmelCase ): lowercase_ = self.esmfold_config.to_dict() return output @dataclass class __lowerCamelCase : """simple docstring""" lowerCAmelCase__ = None lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = 0 lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = 1_28 lowerCAmelCase__ = None def A__ ( self ) -> Any: '''simple docstring''' if self.trunk is None: lowercase_ = TrunkConfig() elif isinstance(self.trunk , UpperCAmelCase ): lowercase_ = TrunkConfig(**self.trunk ) def A__ ( self ) -> Optional[int]: '''simple docstring''' lowercase_ = asdict(self ) lowercase_ = self.trunk.to_dict() return output @dataclass class __lowerCamelCase : """simple docstring""" lowerCAmelCase__ = 48 lowerCAmelCase__ = 10_24 lowerCAmelCase__ = 1_28 lowerCAmelCase__ = 32 lowerCAmelCase__ = 32 lowerCAmelCase__ = 32 lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 lowerCAmelCase__ = False lowerCAmelCase__ = 4 lowerCAmelCase__ = 1_28 lowerCAmelCase__ = None def A__ ( self ) -> Union[str, Any]: '''simple docstring''' if self.structure_module is None: lowercase_ = StructureModuleConfig() elif isinstance(self.structure_module , UpperCAmelCase ): lowercase_ = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'`max_recycles` should be positive, got {self.max_recycles}.' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( "`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got" F' {self.sequence_state_dim} and {self.sequence_state_dim}.' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( "`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got" F' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' ) lowercase_ = self.sequence_state_dim // self.sequence_head_width lowercase_ = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( "`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got" F' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( "`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got" F' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' ) if self.dropout >= 0.4: raise ValueError(F'`dropout` should not be greater than 0.4, got {self.dropout}.' ) def A__ ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ = asdict(self ) lowercase_ = self.structure_module.to_dict() return output @dataclass class __lowerCamelCase : """simple docstring""" lowerCAmelCase__ = 3_84 lowerCAmelCase__ = 1_28 lowerCAmelCase__ = 16 lowerCAmelCase__ = 1_28 lowerCAmelCase__ = 12 lowerCAmelCase__ = 4 lowerCAmelCase__ = 8 lowerCAmelCase__ = 0.1 lowerCAmelCase__ = 8 lowerCAmelCase__ = 1 lowerCAmelCase__ = 2 lowerCAmelCase__ = 7 lowerCAmelCase__ = 10 lowerCAmelCase__ = 1E-8 lowerCAmelCase__ = 1E5 def A__ ( self ) -> Any: '''simple docstring''' return asdict(self ) def SCREAMING_SNAKE_CASE_ ( ): '''simple docstring''' return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
297
import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ): '''simple docstring''' return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :] def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Any="attention" ): '''simple docstring''' lowercase_ = lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] ) lowercase_ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] ) lowercase_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] ) lowercase_ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] ) lowercase_ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=False ): '''simple docstring''' if split_mlp_wi: lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :] lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :] lowercase_ = (wi_a, wi_a) else: lowercase_ = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :] lowercase_ = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :] return wi, wo def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ): '''simple docstring''' return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i] def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: dict , *, __lowerCamelCase: int , __lowerCamelCase: bool , __lowerCamelCase: bool = False ): '''simple docstring''' lowercase_ = traverse_util.flatten_dict(variables["target"] ) lowercase_ = {"/".join(__lowerCamelCase ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowercase_ = "encoder/encoder/mlp/wi_0/kernel" in old print("Split MLP:" , __lowerCamelCase ) lowercase_ = collections.OrderedDict() # Shared embeddings. lowercase_ = old["token_embedder/embedding"] # Encoder. for i in range(__lowerCamelCase ): # Block i, layer 0 (Self Attention). lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_attention_layer_norm" ) lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "attention" ) lowercase_ = layer_norm lowercase_ = k.T lowercase_ = o.T lowercase_ = q.T lowercase_ = v.T # Block i, layer 1 (MLP). lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_mlp_layer_norm" ) lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , __lowerCamelCase ) lowercase_ = layer_norm if split_mlp_wi: lowercase_ = wi[0].T lowercase_ = wi[1].T else: lowercase_ = wi.T lowercase_ = wo.T if scalable_attention: # convert the rel_embedding of each layer lowercase_ = tax_relpos_bias_lookup( __lowerCamelCase , __lowerCamelCase , "encoder" ).T lowercase_ = old["encoder/encoder_norm/scale"] if not scalable_attention: lowercase_ = tax_relpos_bias_lookup( __lowerCamelCase , 0 , "encoder" ).T lowercase_ = tax_relpos_bias_lookup( __lowerCamelCase , 0 , "decoder" ).T if not is_encoder_only: # Decoder. for i in range(__lowerCamelCase ): # Block i, layer 0 (Self Attention). lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_self_attention_layer_norm" ) lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "self_attention" ) lowercase_ = layer_norm lowercase_ = k.T lowercase_ = o.T lowercase_ = q.T lowercase_ = v.T # Block i, layer 1 (Cross Attention). lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_cross_attention_layer_norm" ) lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "encoder_decoder_attention" ) lowercase_ = layer_norm lowercase_ = k.T lowercase_ = o.T lowercase_ = q.T lowercase_ = v.T # Block i, layer 2 (MLP). lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_mlp_layer_norm" ) lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , __lowerCamelCase ) lowercase_ = layer_norm if split_mlp_wi: lowercase_ = wi[0].T lowercase_ = wi[1].T else: lowercase_ = wi.T lowercase_ = wo.T if scalable_attention: # convert the rel_embedding of each layer lowercase_ = tax_relpos_bias_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" ).T lowercase_ = old["decoder/decoder_norm/scale"] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowercase_ = old["decoder/logits_dense/kernel"].T return new def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: bool ): '''simple docstring''' lowercase_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowercase_ = state_dict["shared.weight"] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowercase_ = state_dict["shared.weight"] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("Using shared word embeddings as lm_head." ) lowercase_ = state_dict["shared.weight"] return state_dict def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Any ): '''simple docstring''' lowercase_ = checkpoints.load_tax_checkpoint(__lowerCamelCase ) lowercase_ = convert_tax_to_pytorch( __lowerCamelCase , num_layers=config.num_layers , is_encoder_only=__lowerCamelCase , scalable_attention=__lowerCamelCase ) lowercase_ = make_state_dict(__lowerCamelCase , __lowerCamelCase ) model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ): '''simple docstring''' lowercase_ = MTaConfig.from_json_file(__lowerCamelCase ) print(F'Building PyTorch model from configuration: {config}' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowercase_ = UMTaEncoderModel(__lowerCamelCase ) else: lowercase_ = UMTaForConditionalGeneration(__lowerCamelCase ) # Load weights from tf checkpoint load_tax_weights_in_ta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(__lowerCamelCase ) # Verify that we can load the checkpoint. model.from_pretrained(__lowerCamelCase ) print("Done" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""") # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False ) parser.add_argument( """--scalable_attention""", action="""store_true""", help="""Whether the model uses scaled attention (umt5 model)""", default=False, ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
297
1
"""simple docstring""" import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def snake_case ( self ): debug_launcher(test_script.main ) def snake_case ( self ): debug_launcher(test_ops.main )
57
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 42 snake_case_ = 42 snake_case_ = None class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 2 @register_to_config def __init__( self : str ,A : float = 0.02 ,A : float = 1_00 ,A : float = 1.0_07 ,A : float = 80 ,A : float = 0.05 ,A : float = 50 ,): # standard deviation of the initial noise distribution __A = sigma_max # setable values __A = None __A = None __A = None # sigma(t_i) def UpperCamelCase_ ( self : str ,A : torch.FloatTensor ,A : Optional[int] = None ): return sample def UpperCamelCase_ ( self : Dict ,A : int ,A : Union[str, torch.device] = None ): __A = num_inference_steps __A = np.arange(0 ,self.num_inference_steps )[::-1].copy() __A = torch.from_numpy(A ).to(A ) __A = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] __A = torch.tensor(A ,dtype=torch.floataa ,device=A ) def UpperCamelCase_ ( self : Union[str, Any] ,A : torch.FloatTensor ,A : float ,A : Optional[torch.Generator] = None ): if self.config.s_min <= sigma <= self.config.s_max: __A = min(self.config.s_churn / self.num_inference_steps ,2**0.5 - 1 ) else: __A = 0 # sample eps ~ N(0, S_noise^2 * I) __A = self.config.s_noise * randn_tensor(sample.shape ,generator=A ).to(sample.device ) __A = sigma + gamma * sigma __A = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase_ ( self : Dict ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : bool = True ,): __A = sample_hat + sigma_hat * model_output __A = (sample_hat - pred_original_sample) / sigma_hat __A = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A ,derivative=A ,pred_original_sample=A ) def UpperCamelCase_ ( self : Optional[int] ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : bool = True ,): __A = sample_prev + sigma_prev * model_output __A = (sample_prev - pred_original_sample) / sigma_prev __A = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A ,derivative=A ,pred_original_sample=A ) def UpperCamelCase_ ( self : List[Any] ,A : Dict ,A : List[str] ,A : str ): raise NotImplementedError()
15
0
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf lowercase__ : Dict = logging.get_logger(__name__) @dataclass class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Tuple = [ 'no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process', ] def __init__( self : int , **lowerCAmelCase__ : Any ) -> Tuple: '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: _UpperCamelCase = deprecated_arg[3:] _UpperCamelCase = not kwargs.pop(lowerCAmelCase__ ) logger.warning( f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" f""" {positive_arg}={kwargs[positive_arg]}""" ) _UpperCamelCase = kwargs.pop('''tpu_name''' , self.tpu_name ) _UpperCamelCase = kwargs.pop('''device_idx''' , self.device_idx ) _UpperCamelCase = kwargs.pop('''eager_mode''' , self.eager_mode ) _UpperCamelCase = kwargs.pop('''use_xla''' , self.use_xla ) super().__init__(**lowerCAmelCase__ ) _snake_case : str = field( default=__magic_name__ , metadata={'help': 'Name of TPU'} , ) _snake_case : int = field( default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , ) _snake_case : bool = field(default=__magic_name__ , metadata={'help': 'Benchmark models in eager model.'} ) _snake_case : bool = field( default=__magic_name__ , metadata={ 'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.' } , ) @cached_property def snake_case__ ( self : Optional[int] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: '''simple docstring''' requires_backends(self , ['''tf'''] ) _UpperCamelCase = None if self.tpu: try: if self.tpu_name: _UpperCamelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: _UpperCamelCase = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: _UpperCamelCase = None return tpu @cached_property def snake_case__ ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: '''simple docstring''' requires_backends(self , ['''tf'''] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) _UpperCamelCase = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' ) _UpperCamelCase = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU _UpperCamelCase = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" ) return strategy @property def snake_case__ ( self : Optional[int] ) -> bool: '''simple docstring''' requires_backends(self , ['''tf'''] ) return self._setup_tpu is not None @property def snake_case__ ( self : Union[str, Any] ) -> "tf.distribute.Strategy": '''simple docstring''' requires_backends(self , ['''tf'''] ) return self._setup_strategy @property def snake_case__ ( self : Any ) -> int: '''simple docstring''' requires_backends(self , ['''tf'''] ) return tf.config.list_physical_devices('''GPU''' ) @property def snake_case__ ( self : List[Any] ) -> int: '''simple docstring''' requires_backends(self , ['''tf'''] ) if self.cuda: return len(self.gpu_list ) return 0 @property def snake_case__ ( self : List[Any] ) -> bool: '''simple docstring''' return self.n_gpu > 0
361
'''simple docstring''' from math import isclose, sqrt def a__ ( lowercase : float, lowercase : float, lowercase : float ) -> tuple[float, float, float]: """simple docstring""" _UpperCamelCase = point_y / 4 / point_x _UpperCamelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) _UpperCamelCase = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) _UpperCamelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 _UpperCamelCase = outgoing_gradient**2 + 4 _UpperCamelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) _UpperCamelCase = (point_y - outgoing_gradient * point_x) ** 2 - 100 _UpperCamelCase = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) _UpperCamelCase = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point _UpperCamelCase = x_minus if isclose(lowercase, lowercase ) else x_plus _UpperCamelCase = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a__ ( lowercase : float = 1.4, lowercase : float = -9.6 ) -> int: """simple docstring""" _UpperCamelCase = 0 _UpperCamelCase = first_x_coord _UpperCamelCase = first_y_coord _UpperCamelCase = (1_0.1 - point_y) / (0.0 - point_x) while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0): _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = next_point(lowercase, lowercase, lowercase ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(F"""{solution() = }""")
287
0
def UpperCamelCase_( _snake_case : str ): """simple docstring""" return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
218
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) _lowerCAmelCase : List[Any] = logging.getLogger(__name__) def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : Any ): """simple docstring""" __a =np.argmax(_snake_case , axis=1 ) return np.sum(outputs == labels ) def UpperCamelCase_( _snake_case : List[str] ): """simple docstring""" with open(_snake_case , encoding='utf_8' ) as f: __a =csv.reader(_snake_case ) __a =[] next(_snake_case ) # skip the first line for line in tqdm(_snake_case ): output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def UpperCamelCase_( _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Dict , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Tuple ): """simple docstring""" __a =[] for dataset in encoded_datasets: __a =len(_snake_case ) __a =np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) __a =np.zeros((n_batch, 2) , dtype=np.intaa ) __a =np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa ) __a =np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(_snake_case ): __a =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __a =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __a =with_conta __a =with_conta __a =len(_snake_case ) - 1 __a =len(_snake_case ) - 1 __a =with_conta __a =with_conta __a =mc_label __a =(input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(_snake_case ) for t in all_inputs ) ) return tensor_datasets def UpperCamelCase_( ): """simple docstring""" __a =argparse.ArgumentParser() parser.add_argument('--model_name' , type=_snake_case , default='openai-gpt' , help='pretrained model name' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' ) parser.add_argument( '--output_dir' , default=_snake_case , type=_snake_case , required=_snake_case , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument('--train_dataset' , type=_snake_case , default='' ) parser.add_argument('--eval_dataset' , type=_snake_case , default='' ) parser.add_argument('--seed' , type=_snake_case , default=42 ) parser.add_argument('--num_train_epochs' , type=_snake_case , default=3 ) parser.add_argument('--train_batch_size' , type=_snake_case , default=8 ) parser.add_argument('--eval_batch_size' , type=_snake_case , default=16 ) parser.add_argument('--adam_epsilon' , default=1e-8 , type=_snake_case , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , type=_snake_case , default=1 ) parser.add_argument( '--max_steps' , default=-1 , type=_snake_case , help=( 'If > 0: set total number of training steps to perform. Override num_train_epochs.' ) , ) parser.add_argument( '--gradient_accumulation_steps' , type=_snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--learning_rate' , type=_snake_case , default=6.2_5e-5 ) parser.add_argument('--warmup_steps' , default=0 , type=_snake_case , help='Linear warmup over warmup_steps.' ) parser.add_argument('--lr_schedule' , type=_snake_case , default='warmup_linear' ) parser.add_argument('--weight_decay' , type=_snake_case , default=0.01 ) parser.add_argument('--lm_coef' , type=_snake_case , default=0.9 ) parser.add_argument('--n_valid' , type=_snake_case , default=374 ) parser.add_argument('--server_ip' , type=_snake_case , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=_snake_case , default='' , help='Can be used for distant debugging.' ) __a =parser.parse_args() print(_snake_case ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_snake_case ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) __a =torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) __a =torch.cuda.device_count() logger.info('device: {}, n_gpu {}'.format(_snake_case , _snake_case ) ) if not args.do_train and not args.do_eval: raise ValueError('At least one of `do_train` or `do_eval` must be True.' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset __a =['_start_', '_delimiter_', '_classify_'] __a =OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(_snake_case ) __a =tokenizer.convert_tokens_to_ids(_snake_case ) __a =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(_snake_case ) ) model.to(_snake_case ) # Load and encode the datasets def tokenize_and_encode(_snake_case : int ): if isinstance(_snake_case , _snake_case ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_snake_case ) ) elif isinstance(_snake_case , _snake_case ): return obj return [tokenize_and_encode(_snake_case ) for o in obj] logger.info('Encoding dataset...' ) __a =load_rocstories_dataset(args.train_dataset ) __a =load_rocstories_dataset(args.eval_dataset ) __a =(train_dataset, eval_dataset) __a =tokenize_and_encode(_snake_case ) # Compute the max input length for the Transformer __a =model.config.n_positions // 2 - 2 __a =max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) __a =min(_snake_case , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders __a =pre_process_datasets(_snake_case , _snake_case , _snake_case , *_snake_case ) __a , __a =tensor_datasets[0], tensor_datasets[1] __a =TensorDataset(*_snake_case ) __a =RandomSampler(_snake_case ) __a =DataLoader(_snake_case , sampler=_snake_case , batch_size=args.train_batch_size ) __a =TensorDataset(*_snake_case ) __a =SequentialSampler(_snake_case ) __a =DataLoader(_snake_case , sampler=_snake_case , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: __a =args.max_steps __a =args.max_steps // (len(_snake_case ) // args.gradient_accumulation_steps) + 1 else: __a =len(_snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs __a =list(model.named_parameters() ) __a =['bias', 'LayerNorm.bias', 'LayerNorm.weight'] __a =[ { 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], 'weight_decay': args.weight_decay, }, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0}, ] __a =AdamW(_snake_case , lr=args.learning_rate , eps=args.adam_epsilon ) __a =get_linear_schedule_with_warmup( _snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=_snake_case ) if args.do_train: __a , __a , __a =0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ): __a =0 __a =0 __a =tqdm(_snake_case , desc='Training' ) for step, batch in enumerate(_snake_case ): __a =tuple(t.to(_snake_case ) for t in batch ) __a , __a , __a , __a =batch __a =model(_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case ) __a =args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() __a =( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 __a ='Training loss: {:.2e} lr: {:.2e}'.format(_snake_case , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer __a =model.module if hasattr(_snake_case , 'module' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` __a =os.path.join(args.output_dir , _snake_case ) __a =os.path.join(args.output_dir , _snake_case ) torch.save(model_to_save.state_dict() , _snake_case ) model_to_save.config.to_json_file(_snake_case ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned __a =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) __a =OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(_snake_case ) if args.do_eval: model.eval() __a , __a =0, 0 __a , __a =0, 0 for batch in tqdm(_snake_case , desc='Evaluating' ): __a =tuple(t.to(_snake_case ) for t in batch ) __a , __a , __a , __a =batch with torch.no_grad(): __a , __a , __a , __a =model( _snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case ) __a =mc_logits.detach().cpu().numpy() __a =mc_labels.to('cpu' ).numpy() __a =accuracy(_snake_case , _snake_case ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 __a =eval_loss / nb_eval_steps __a =eval_accuracy / nb_eval_examples __a =tr_loss / nb_tr_steps if args.do_train else None __a ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss} __a =os.path.join(args.output_dir , 'eval_results.txt' ) with open(_snake_case , 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' , _snake_case , str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) if __name__ == "__main__": main()
218
1
'''simple docstring''' def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=None ): '''simple docstring''' _UpperCAmelCase : Dict =(path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: _UpperCAmelCase : Optional[Any] =True, True _UpperCAmelCase : Optional[Any] =dfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return path def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ): '''simple docstring''' _UpperCAmelCase : int =0 _UpperCAmelCase : Optional[int] =-1 for i in range(__lowerCamelCase ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 _UpperCAmelCase : Union[str, Any] =i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ): '''simple docstring''' _UpperCAmelCase : Union[str, Any] =[[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] _UpperCAmelCase : Tuple =check_circuit_or_path(__lowerCamelCase , __lowerCamelCase ) if check == 3: print('graph is not Eulerian' ) print('no path' ) return _UpperCAmelCase : Optional[Any] =1 if check == 2: _UpperCAmelCase : int =odd_node print('graph has a Euler path' ) if check == 1: print('graph has a Euler cycle' ) _UpperCAmelCase : Optional[int] =dfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) print(__lowerCamelCase ) def lowerCamelCase__ ( ): '''simple docstring''' _UpperCAmelCase : List[str] ={1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} _UpperCAmelCase : Tuple ={1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} _UpperCAmelCase : List[str] ={1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} _UpperCAmelCase : Any ={1: [2, 3], 2: [1, 3], 3: [1, 2]} _UpperCAmelCase : Tuple ={ 1: [], 2: [] # all degree is zero } _UpperCAmelCase : Optional[Any] =1_0 check_euler(__lowerCamelCase , __lowerCamelCase ) check_euler(__lowerCamelCase , __lowerCamelCase ) check_euler(__lowerCamelCase , __lowerCamelCase ) check_euler(__lowerCamelCase , __lowerCamelCase ) check_euler(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": main()
367
'''simple docstring''' import datasets from .evaluate import evaluate lowercase ='\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n' lowercase ='\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n' lowercase ='\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): def lowerCAmelCase ( self) -> List[str]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': {'id': datasets.Value('string'), 'prediction_text': datasets.Value('string')}, 'references': { 'id': datasets.Value('string'), 'answers': datasets.features.Sequence( { 'text': datasets.Value('string'), 'answer_start': datasets.Value('int32'), }), }, }) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , ) def lowerCAmelCase ( self , snake_case , snake_case) -> int: '''simple docstring''' _UpperCAmelCase : Optional[int] ={prediction['id']: prediction['prediction_text'] for prediction in predictions} _UpperCAmelCase : Optional[int] =[ { 'paragraphs': [ { 'qas': [ { 'answers': [{'text': answer_text} for answer_text in ref['answers']['text']], 'id': ref['id'], } for ref in references ] } ] } ] _UpperCAmelCase : List[Any] =evaluate(dataset=snake_case , predictions=snake_case) return score
242
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __snake_case = { """configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TimesformerModel""", """TimesformerForVideoClassification""", """TimesformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
259
def _A ( ): for n in range(1 , 1000000 ): yield n * (n + 1) // 2 def _A ( SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :Optional[int] = 1 UpperCamelCase :List[Any] = 2 while i * i <= n: UpperCamelCase :str = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def _A ( ): return next(i for i in triangle_number_generator() if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500 ) if __name__ == "__main__": print(solution())
259
1
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str: return "".join([hex(SCREAMING_SNAKE_CASE__ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE__ )] ) def _snake_case( SCREAMING_SNAKE_CASE__ ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(SCREAMING_SNAKE_CASE__ ) % 2) != 0: raise ValueError( """Base16 encoded data is invalid: Data does not have an even number of hex digits.""" ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(SCREAMING_SNAKE_CASE__ ) <= set("""0123456789ABCDEF""" ): raise ValueError( """Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.""" ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
285
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int: assert ( isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and number_of_steps > 0 ), f"number_of_steps needs to be positive integer, your input {number_of_steps}" if number_of_steps == 1: return 1 lowercase , lowercase : Tuple = 1, 1 for _ in range(number_of_steps - 1 ): lowercase , lowercase : str = current + previous, current return current if __name__ == "__main__": import doctest doctest.testmod()
285
1
'''simple docstring''' from __future__ import annotations import math def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" if depth < 0: raise ValueError("""Depth cannot be less than 0""" ) if not scores: raise ValueError("""Scores cannot be empty""" ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , ) ) def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423] lowerCAmelCase__ : Optional[int] = math.log(len(UpperCamelCase ) , 2 ) print(f"""Optimal value : {minimax(0 , 0 , UpperCamelCase , UpperCamelCase , UpperCamelCase )}""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
37
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Tuple = logging.get_logger(__name__) lowerCamelCase : Dict = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Tuple = """cvt""" def __init__(self : int , UpperCamelCase : List[Any]=3 , UpperCamelCase : int=[7, 3, 3] , UpperCamelCase : str=[4, 2, 2] , UpperCamelCase : Dict=[2, 1, 1] , UpperCamelCase : Dict=[64, 192, 384] , UpperCamelCase : Dict=[1, 3, 6] , UpperCamelCase : Dict=[1, 2, 10] , UpperCamelCase : Any=[4.0, 4.0, 4.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : int=[0.0, 0.0, 0.1] , UpperCamelCase : Any=[True, True, True] , UpperCamelCase : int=[False, False, True] , UpperCamelCase : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase : Optional[int]=[3, 3, 3] , UpperCamelCase : Tuple=[1, 1, 1] , UpperCamelCase : Any=[2, 2, 2] , UpperCamelCase : Dict=[1, 1, 1] , UpperCamelCase : List[str]=[1, 1, 1] , UpperCamelCase : str=0.02 , UpperCamelCase : int=1E-12 , **UpperCamelCase : Union[str, Any] , ): '''simple docstring''' super().__init__(**UpperCamelCase ) lowercase__ = num_channels lowercase__ = patch_sizes lowercase__ = patch_stride lowercase__ = patch_padding lowercase__ = embed_dim lowercase__ = num_heads lowercase__ = depth lowercase__ = mlp_ratio lowercase__ = attention_drop_rate lowercase__ = drop_rate lowercase__ = drop_path_rate lowercase__ = qkv_bias lowercase__ = cls_token lowercase__ = qkv_projection_method lowercase__ = kernel_qkv lowercase__ = padding_kv lowercase__ = stride_kv lowercase__ = padding_q lowercase__ = stride_q lowercase__ = initializer_range lowercase__ = layer_norm_eps
2
0
import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __snake_case : str ='▁' __snake_case : Tuple =get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase): '''simple docstring''' snake_case_ =BigBirdTokenizer snake_case_ =BigBirdTokenizerFast snake_case_ =True snake_case_ =True def lowerCAmelCase__ (self ) -> str: """simple docstring""" super().setUp() lowerCAmelCase__ : str = self.tokenizer_class(__lowerCamelCase ,keep_accents=__lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase__ (self ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase__ : List[Any] = '''<s>''' lowerCAmelCase__ : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) ,__lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) ,__lowerCamelCase ) def lowerCAmelCase__ (self ) -> Tuple: """simple docstring""" lowerCAmelCase__ : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'''<unk>''' ) self.assertEqual(vocab_keys[1] ,'''<s>''' ) self.assertEqual(vocab_keys[-1] ,'''[MASK]''' ) self.assertEqual(len(__lowerCamelCase ) ,10_04 ) def lowerCAmelCase__ (self ) -> int: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size ,10_00 ) def lowerCAmelCase__ (self ) -> str: """simple docstring""" if not self.test_rust_tokenizer: return lowerCAmelCase__ : List[str] = self.get_tokenizer() lowerCAmelCase__ : str = self.get_rust_tokenizer() lowerCAmelCase__ : Union[str, Any] = '''I was born in 92000, and this is falsé.''' lowerCAmelCase__ : Optional[Any] = tokenizer.tokenize(__lowerCamelCase ) lowerCAmelCase__ : Any = rust_tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase ,__lowerCamelCase ) lowerCAmelCase__ : List[str] = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase ) lowerCAmelCase__ : Dict = rust_tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase ,__lowerCamelCase ) lowerCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer() lowerCAmelCase__ : Optional[int] = tokenizer.encode(__lowerCamelCase ) lowerCAmelCase__ : List[str] = rust_tokenizer.encode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase ,__lowerCamelCase ) def lowerCAmelCase__ (self ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : List[Any] = BigBirdTokenizer(__lowerCamelCase ,keep_accents=__lowerCamelCase ) lowerCAmelCase__ : Union[str, Any] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__lowerCamelCase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCamelCase ) ,[2_85, 46, 10, 1_70, 3_82] ,) lowerCAmelCase__ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __lowerCamelCase ,[ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] ,) lowerCAmelCase__ : List[str] = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] ,) lowerCAmelCase__ : List[Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase ,[ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] ,) @cached_property def lowerCAmelCase__ (self ) -> Optional[Any]: """simple docstring""" return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) @slow def lowerCAmelCase__ (self ) -> int: """simple docstring""" lowerCAmelCase__ : str = '''Hello World!''' lowerCAmelCase__ : List[str] = [65, 1_85_36, 22_60, 1_01, 66] self.assertListEqual(__lowerCamelCase ,self.big_tokenizer.encode(__lowerCamelCase ) ) @slow def lowerCAmelCase__ (self ) -> List[str]: """simple docstring""" lowerCAmelCase__ : Optional[int] = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) # fmt: off lowerCAmelCase__ : List[str] = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231 # fmt: on self.assertListEqual(__lowerCamelCase ,self.big_tokenizer.encode(__lowerCamelCase ) ) @require_torch @slow def lowerCAmelCase__ (self ) -> List[Any]: """simple docstring""" import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence lowerCAmelCase__ : Tuple = list(self.big_tokenizer.get_vocab().keys() )[:10] lowerCAmelCase__ : int = ''' '''.join(__lowerCamelCase ) lowerCAmelCase__ : int = self.big_tokenizer.encode_plus(__lowerCamelCase ,return_tensors='''pt''' ,return_token_type_ids=__lowerCamelCase ) lowerCAmelCase__ : Optional[Any] = self.big_tokenizer.batch_encode_plus( [sequence + ''' ''' + sequence] ,return_tensors='''pt''' ,return_token_type_ids=__lowerCamelCase ) lowerCAmelCase__ : str = BigBirdConfig(attention_type='''original_full''' ) lowerCAmelCase__ : Any = BigBirdModel(__lowerCamelCase ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__lowerCamelCase ) model(**__lowerCamelCase ) @slow def lowerCAmelCase__ (self ) -> Any: """simple docstring""" lowerCAmelCase__ : int = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) lowerCAmelCase__ : Union[str, Any] = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids ) self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' ) @slow def lowerCAmelCase__ (self ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : Optional[int] = {'''input_ids''': [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase ,model_name='''google/bigbird-roberta-base''' ,revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' ,)
94
from math import factorial def lowerCAmelCase__ ( lowerCamelCase_ : int = 100): '''simple docstring''' return sum(map(lowerCamelCase_ ,str(factorial(lowerCamelCase_)))) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
94
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging a_ : List[Any] = logging.get_logger(__name__) a_ : Any = """▁""" a_ : Dict = {"""vocab_file""": """sentencepiece.bpe.model"""} a_ : Dict = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model""" ), } } a_ : str = { """facebook/nllb-200-distilled-600M""": 1024, } # fmt: off a_ : List[str] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = ["input_ids", "attention_mask"] _lowerCamelCase = [] _lowerCamelCase = [] def __init__( self , UpperCamelCase , UpperCamelCase="<s>" , UpperCamelCase="</s>" , UpperCamelCase="</s>" , UpperCamelCase="<s>" , UpperCamelCase="<unk>" , UpperCamelCase="<pad>" , UpperCamelCase="<mask>" , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase = None , UpperCamelCase=None , UpperCamelCase=False , **UpperCamelCase , ): """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase_ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs lowerCamelCase_ = legacy_behaviour super().__init__( bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , tokenizer_file=UpperCamelCase , src_lang=UpperCamelCase , tgt_lang=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=UpperCamelCase , **UpperCamelCase , ) lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase ) ) lowerCamelCase_ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token lowerCamelCase_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowerCamelCase_ = 1 lowerCamelCase_ = len(self.sp_model ) lowerCamelCase_ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase ) } lowerCamelCase_ = {v: k for k, v in self.lang_code_to_id.items()} lowerCamelCase_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) lowerCamelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} lowerCamelCase_ = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) lowerCamelCase_ = src_lang if src_lang is not None else "eng_Latn" lowerCamelCase_ = self.lang_code_to_id[self._src_lang] lowerCamelCase_ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ): """simple docstring""" lowerCamelCase_ = self.__dict__.copy() lowerCamelCase_ = None lowerCamelCase_ = self.sp_model.serialized_model_proto() return state def __setstate__( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCamelCase_ = {} lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def snake_case ( self ): """simple docstring""" return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def snake_case ( self ): """simple docstring""" return self._src_lang @src_lang.setter def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def snake_case ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase ) lowerCamelCase_ = [1] * len(self.prefix_tokens ) lowerCamelCase_ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(UpperCamelCase )) + suffix_ones return prefix_ones + ([0] * len(UpperCamelCase )) + ([0] * len(UpperCamelCase )) + suffix_ones def snake_case ( self , UpperCamelCase , UpperCamelCase = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def snake_case ( self , UpperCamelCase , UpperCamelCase = None ): """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) lowerCamelCase_ = src_lang lowerCamelCase_ = self(UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) lowerCamelCase_ = self.convert_tokens_to_ids(UpperCamelCase ) lowerCamelCase_ = tgt_lang_id return inputs def snake_case ( self ): """simple docstring""" lowerCamelCase_ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case ( self , UpperCamelCase ): """simple docstring""" return self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase ) def snake_case ( self , UpperCamelCase ): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCamelCase_ = self.sp_model.PieceToId(UpperCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def snake_case ( self , UpperCamelCase ): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = "".join(UpperCamelCase ).replace(UpperCamelCase , " " ).strip() return out_string def snake_case ( self , UpperCamelCase , UpperCamelCase = None ): """simple docstring""" if not os.path.isdir(UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ = os.path.join( UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase , "wb" ) as fi: lowerCamelCase_ = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase ) return (out_vocab_file,) def snake_case ( self , UpperCamelCase , UpperCamelCase = "eng_Latn" , UpperCamelCase = None , UpperCamelCase = "fra_Latn" , **UpperCamelCase , ): """simple docstring""" lowerCamelCase_ = src_lang lowerCamelCase_ = tgt_lang return super().prepare_seqaseq_batch(UpperCamelCase , UpperCamelCase , **UpperCamelCase ) def snake_case ( self ): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def snake_case ( self ): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.lang_code_to_id[src_lang] if self.legacy_behaviour: lowerCamelCase_ = [] lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code] else: lowerCamelCase_ = [self.cur_lang_code] lowerCamelCase_ = [self.eos_token_id] def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.lang_code_to_id[lang] if self.legacy_behaviour: lowerCamelCase_ = [] lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code] else: lowerCamelCase_ = [self.cur_lang_code] lowerCamelCase_ = [self.eos_token_id]
55
'''simple docstring''' import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _a , unittest.TestCase ): """simple docstring""" lowercase = GPTSanJapaneseTokenizer lowercase = False lowercase = {"do_clean_text": False, "add_prefix_space": False} def lowerCamelCase ( self : str ): super().setUp() # fmt: off snake_case__ : Optional[Any] = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""] # fmt: on snake_case__ : int = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀 snake_case__ : List[Any] = {"""unk_token""": """<unk>"""} snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.emoji_file , """w""" ) as emoji_writer: emoji_writer.write(json.dumps(snake_case_ ) ) def lowerCamelCase ( self : Any , **snake_case_ : Union[str, Any] ): kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def lowerCamelCase ( self : Any , snake_case_ : str ): snake_case__ : Union[str, Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀""" snake_case__ : List[str] = """こんにちは、世界。 \nこんばんは、世界。😀""" return input_text, output_text def lowerCamelCase ( self : Any , snake_case_ : Dict ): snake_case__ , snake_case__ : int = self.get_input_output_texts(snake_case_ ) snake_case__ : int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) snake_case__ : List[str] = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ ) return text, ids def lowerCamelCase ( self : Optional[Any] ): pass # TODO add if relevant def lowerCamelCase ( self : Union[str, Any] ): pass # TODO add if relevant def lowerCamelCase ( self : List[str] ): pass # TODO add if relevant def lowerCamelCase ( self : Dict ): snake_case__ : Optional[Any] = self.get_tokenizer() # Testing tokenization snake_case__ : int = """こんにちは、世界。 こんばんは、㔺界。""" snake_case__ : Optional[int] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""] snake_case__ : Dict = tokenizer.tokenize(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) # Testing conversion to ids without special tokens snake_case__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) # Testing conversion to ids with special tokens snake_case__ : Union[str, Any] = tokens + [tokenizer.unk_token] snake_case__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] snake_case__ : Any = tokenizer.convert_tokens_to_ids(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) def lowerCamelCase ( self : Optional[Any] ): snake_case__ : Union[str, Any] = self.get_tokenizer() # Testing tokenization snake_case__ : Union[str, Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。""" snake_case__ : Optional[int] = """こんにちは、、、、世界。こんばんは、、、、世界。""" snake_case__ : Any = tokenizer.encode(snake_case_ ) snake_case__ : int = tokenizer.decode(snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization snake_case__ : Tuple = """こんにちは、世界。""" snake_case__ : Optional[Any] = """こんばんは、㔺界。😀""" snake_case__ : List[str] = """こんにちは、世界。こんばんは、世界。😀""" snake_case__ : Dict = tokenizer.encode(prefix_text + input_text ) snake_case__ : Dict = tokenizer.encode("""""" , prefix_text=prefix_text + input_text ) snake_case__ : int = tokenizer.encode(snake_case_ , prefix_text=snake_case_ ) snake_case__ : Optional[Any] = tokenizer.decode(snake_case_ ) snake_case__ : Union[str, Any] = tokenizer.decode(snake_case_ ) snake_case__ : str = tokenizer.decode(snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization snake_case__ : Dict = """こんにちは、世界。""" snake_case__ : Optional[int] = """こんばんは、㔺界。😀""" snake_case__ : Any = len(tokenizer.encode(snake_case_ ) ) - 2 snake_case__ : Optional[int] = len(tokenizer.encode(snake_case_ ) ) - 2 snake_case__ : List[str] = [1] + [0] * (len_prefix + len_text + 1) snake_case__ : Optional[int] = [1] * (len_prefix + len_text + 1) + [0] snake_case__ : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1) snake_case__ : Any = tokenizer(prefix_text + input_text ).token_type_ids snake_case__ : str = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids snake_case__ : Optional[Any] = tokenizer(snake_case_ , prefix_text=snake_case_ ).token_type_ids self.assertListEqual(snake_case_ , snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : Optional[int] ): snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) snake_case__ : Union[str, Any] = tokenizer.encode("""あンいワ""" ) snake_case__ : int = tokenizer.encode("""""" , prefix_text="""あンいワ""" ) snake_case__ : Dict = tokenizer.encode("""いワ""" , prefix_text="""あン""" ) self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) ) self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) ) self.assertNotEqual(snake_case_ , snake_case_ ) self.assertNotEqual(snake_case_ , snake_case_ ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def lowerCamelCase ( self : Any ): snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) snake_case__ : int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]] snake_case__ : Optional[Any] = tokenizer(snake_case_ , padding=snake_case_ ) snake_case__ : Tuple = tokenizer.batch_encode_plus(snake_case_ , padding=snake_case_ ) # fmt: off snake_case__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]] snake_case__ : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] snake_case__ : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , snake_case_ ) self.assertListEqual(x_token.token_type_ids , snake_case_ ) self.assertListEqual(x_token.attention_mask , snake_case_ ) self.assertListEqual(x_token_a.input_ids , snake_case_ ) self.assertListEqual(x_token_a.token_type_ids , snake_case_ ) self.assertListEqual(x_token_a.attention_mask , snake_case_ ) def lowerCamelCase ( self : Any ): # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def lowerCamelCase ( self : List[str] ): # tokenizer has no padding token pass
35
0
'''simple docstring''' import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class A ( unittest.TestCase ): def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any ) -> Dict: """simple docstring""" self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) for a, b in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertAlmostEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , delta=SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( self : str ) -> Dict: """simple docstring""" _a = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(SCREAMING_SNAKE_CASE_ ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" _a = None ops.enable_eager_execution_internal() _a = tf.config.list_physical_devices('''CPU''' ) if len(SCREAMING_SNAKE_CASE_ ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) _a = tf.config.list_logical_devices(device_type='''CPU''' ) _a = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): _a = GradientAccumulator() _a = tf.Variable([4.0, 3.0] ) _a = create_optimizer(5e-5 , 10 , 5 ) _a = tf.Variable([0.0, 0.0] , trainable=SCREAMING_SNAKE_CASE_ ) def accumulate_on_replica(lowerCAmelCase_ : Dict ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ): with strategy.scope(): _a = strategy.experimental_local_results(SCREAMING_SNAKE_CASE_ ) local_variables[0].assign(SCREAMING_SNAKE_CASE_ ) local_variables[1].assign(SCREAMING_SNAKE_CASE_ ) strategy.run(SCREAMING_SNAKE_CASE_ , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(SCREAMING_SNAKE_CASE_ ) def _check_local_values(lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] ): _a = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , SCREAMING_SNAKE_CASE_ , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , SCREAMING_SNAKE_CASE_ , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
365
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer _snake_case : Any = logging.get_logger(__name__) _snake_case : Any = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} _snake_case : List[str] = { 'vocab_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt' ), 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt' ), 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt', 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json' ), 'bert-base-multilingual-cased': ( 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json' ), 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-cased': ( 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json' ), }, } _snake_case : int = { 'bert-base-uncased': 512, 'bert-large-uncased': 512, 'bert-base-cased': 512, 'bert-large-cased': 512, 'bert-base-multilingual-uncased': 512, 'bert-base-multilingual-cased': 512, 'bert-base-chinese': 512, 'bert-base-german-cased': 512, 'bert-large-uncased-whole-word-masking': 512, 'bert-large-cased-whole-word-masking': 512, 'bert-large-uncased-whole-word-masking-finetuned-squad': 512, 'bert-large-cased-whole-word-masking-finetuned-squad': 512, 'bert-base-cased-finetuned-mrpc': 512, 'bert-base-german-dbmdz-cased': 512, 'bert-base-german-dbmdz-uncased': 512, 'TurkuNLP/bert-base-finnish-cased-v1': 512, 'TurkuNLP/bert-base-finnish-uncased-v1': 512, 'wietsedv/bert-base-dutch-cased': 512, } _snake_case : int = { 'bert-base-uncased': {'do_lower_case': True}, 'bert-large-uncased': {'do_lower_case': True}, 'bert-base-cased': {'do_lower_case': False}, 'bert-large-cased': {'do_lower_case': False}, 'bert-base-multilingual-uncased': {'do_lower_case': True}, 'bert-base-multilingual-cased': {'do_lower_case': False}, 'bert-base-chinese': {'do_lower_case': False}, 'bert-base-german-cased': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False}, 'bert-base-cased-finetuned-mrpc': {'do_lower_case': False}, 'bert-base-german-dbmdz-cased': {'do_lower_case': False}, 'bert-base-german-dbmdz-uncased': {'do_lower_case': True}, 'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False}, 'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True}, 'wietsedv/bert-base-dutch-cased': {'do_lower_case': False}, } class A ( _a ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_INIT_CONFIGURATION lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = BertTokenizer def __init__( self : str , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Tuple="[PAD]" , lowerCAmelCase_ : Tuple="[CLS]" , lowerCAmelCase_ : Optional[int]="[MASK]" , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]: """simple docstring""" super().__init__( lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , ) _a = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars ): _a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) ) _a = do_lower_case _a = strip_accents _a = tokenize_chinese_chars _a = normalizer_class(**lowerCAmelCase_ ) _a = do_lower_case def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int=None ) -> List[str]: """simple docstring""" _a = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" _a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ ) return tuple(lowerCAmelCase_ )
179
0
'''simple docstring''' import os def UpperCAmelCase ( a_ = "input.txt" ) -> int: """simple docstring""" with open(os.path.join(os.path.dirname(a_ ) , a_ ) ) as input_file: A_ : str = [ [int(a_ ) for element in line.split(""",""" )] for line in input_file.readlines() ] A_ : Optional[int] = len(a_ ) A_ : Optional[int] = len(matrix[0] ) A_ : Dict = [[-1 for _ in range(a_ )] for _ in range(a_ )] for i in range(a_ ): A_ : str = matrix[i][0] for j in range(1 , a_ ): for i in range(a_ ): A_ : Dict = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , a_ ): A_ : Optional[Any] = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): A_ : Optional[Any] = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'{solution() = }')
344
'''simple docstring''' from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def UpperCAmelCase ( a_ ) -> Dict[str, torch.Tensor]: """simple docstring""" A_ : List[str] = [] A_ : Dict = [] A_ : List[Any] = [] for rt in rc.restypes: A_ : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) A_ : Union[str, Any] = {name: i for i, name in enumerate(a_ )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 1_4 ) restype_atomaa_to_atomaa_list.append([0] * 3_7 ) restype_atomaa_mask_list.append([0.0] * 1_4 ) A_ : Tuple = torch.tensor( a_ , dtype=torch.intaa , device=protein["""aatype"""].device , ) A_ : Optional[int] = torch.tensor( a_ , dtype=torch.intaa , device=protein["""aatype"""].device , ) A_ : List[Any] = torch.tensor( a_ , dtype=torch.floataa , device=protein["""aatype"""].device , ) A_ : Optional[int] = protein["""aatype"""].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein A_ : Dict = restype_atomaa_to_atomaa[protein_aatype] A_ : Optional[Any] = restype_atomaa_mask[protein_aatype] A_ : Any = residx_atomaa_mask A_ : List[str] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back A_ : Tuple = restype_atomaa_to_atomaa[protein_aatype] A_ : Tuple = residx_atomaa_to_atomaa.long() # create the corresponding mask A_ : Optional[Any] = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["""aatype"""].device ) for restype, restype_letter in enumerate(rc.restypes ): A_ : Optional[Any] = rc.restype_atoa[restype_letter] A_ : Any = rc.residue_atoms[restype_name] for atom_name in atom_names: A_ : Any = rc.atom_order[atom_name] A_ : Optional[int] = 1 A_ : Optional[int] = restype_atomaa_mask[protein_aatype] A_ : Dict = residx_atomaa_mask return protein def UpperCAmelCase ( a_ ) -> Dict[str, np.ndarray]: """simple docstring""" A_ : Union[str, Any] = tree_map(lambda a_ : torch.tensor(a_ , device=batch["""aatype"""].device ) , a_ , np.ndarray ) A_ : Optional[int] = tensor_tree_map(lambda a_ : np.array(a_ ) , make_atomaa_masks(a_ ) ) return out
344
1
"""simple docstring""" import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def lowercase__ ( _UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase : str = image.size lowercase : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowercase : Tuple = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) lowercase : Dict = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_55.0 lowercase : str = image[None].transpose(0 , 3 , 1 , 2 ) lowercase : List[Any] = torch.from_numpy(_UpperCAmelCase ) return 2.0 * image - 1.0 class a__ ( SCREAMING_SNAKE_CASE__ ): def __init__( self : Optional[int], lowerCAmelCase : VQModel, lowerCAmelCase : UNetaDModel, lowerCAmelCase : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ) -> Optional[int]: super().__init__() self.register_modules(vqvae=lowerCAmelCase, unet=lowerCAmelCase, scheduler=lowerCAmelCase ) @torch.no_grad() def __call__( self : List[Any], lowerCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None, lowerCAmelCase : Optional[int] = 1, lowerCAmelCase : Optional[int] = 100, lowerCAmelCase : Optional[float] = 0.0, lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, lowerCAmelCase : Optional[str] = "pil", lowerCAmelCase : bool = True, ) -> Union[Tuple, ImagePipelineOutput]: if isinstance(lowerCAmelCase, PIL.Image.Image ): lowercase : Dict = 1 elif isinstance(lowerCAmelCase, torch.Tensor ): lowercase : str = image.shape[0] else: raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCAmelCase )}''' ) if isinstance(lowerCAmelCase, PIL.Image.Image ): lowercase : Dict = preprocess(lowerCAmelCase ) lowercase : int = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image lowercase : Tuple = (batch_size, self.unet.config.in_channels // 2, height, width) lowercase : Tuple = next(self.unet.parameters() ).dtype lowercase : Optional[Any] = randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=lowerCAmelCase ) lowercase : int = image.to(device=self.device, dtype=lowerCAmelCase ) # set timesteps and move to the correct device self.scheduler.set_timesteps(lowerCAmelCase, device=self.device ) lowercase : Tuple = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler lowercase : Optional[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowercase : Tuple = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowercase : Tuple = {} if accepts_eta: lowercase : Any = eta for t in self.progress_bar(lowerCAmelCase ): # concat latents and low resolution image in the channel dimension. lowercase : Optional[Any] = torch.cat([latents, image], dim=1 ) lowercase : Optional[Any] = self.scheduler.scale_model_input(lowerCAmelCase, lowerCAmelCase ) # predict the noise residual lowercase : str = self.unet(lowerCAmelCase, lowerCAmelCase ).sample # compute the previous noisy sample x_t -> x_t-1 lowercase : List[str] = self.scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample # decode the image latents with the VQVAE lowercase : str = self.vqvae.decode(lowerCAmelCase ).sample lowercase : Union[str, Any] = torch.clamp(lowerCAmelCase, -1.0, 1.0 ) lowercase : int = image / 2 + 0.5 lowercase : List[str] = image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": lowercase : str = self.numpy_to_pil(lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase )
359
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase: str = logging.get_logger(__name__) _UpperCamelCase: Union[str, Any] = { 'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json', } class a__ ( SCREAMING_SNAKE_CASE__ ): _lowerCamelCase = 'mgp-str' def __init__( self : Tuple, lowerCAmelCase : str=[32, 128], lowerCAmelCase : List[Any]=4, lowerCAmelCase : Union[str, Any]=3, lowerCAmelCase : Union[str, Any]=27, lowerCAmelCase : Union[str, Any]=38, lowerCAmelCase : Tuple=50257, lowerCAmelCase : Dict=30522, lowerCAmelCase : Optional[int]=768, lowerCAmelCase : Optional[int]=12, lowerCAmelCase : Optional[int]=12, lowerCAmelCase : Union[str, Any]=4.0, lowerCAmelCase : Any=True, lowerCAmelCase : Optional[int]=False, lowerCAmelCase : Optional[int]=1e-5, lowerCAmelCase : List[str]=0.0, lowerCAmelCase : Optional[Any]=0.0, lowerCAmelCase : List[str]=0.0, lowerCAmelCase : Dict=False, lowerCAmelCase : Union[str, Any]=0.02, **lowerCAmelCase : Optional[int], ) -> List[Any]: super().__init__(**lowerCAmelCase ) lowercase : int = image_size lowercase : Dict = patch_size lowercase : List[str] = num_channels lowercase : Union[str, Any] = max_token_length lowercase : str = num_character_labels lowercase : Tuple = num_bpe_labels lowercase : Tuple = num_wordpiece_labels lowercase : Optional[Any] = hidden_size lowercase : Tuple = num_hidden_layers lowercase : Optional[Any] = num_attention_heads lowercase : Tuple = mlp_ratio lowercase : Union[str, Any] = distilled lowercase : List[str] = layer_norm_eps lowercase : Optional[int] = drop_rate lowercase : Tuple = qkv_bias lowercase : int = attn_drop_rate lowercase : Any = drop_path_rate lowercase : Optional[Any] = output_aa_attentions lowercase : Optional[Any] = initializer_range
53
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 4000000 ): """simple docstring""" lowerCAmelCase__ : Optional[int] = [] lowerCAmelCase__ , lowerCAmelCase__ : Tuple = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(UpperCamelCase ) lowerCAmelCase__ , lowerCAmelCase__ : Dict = b, a + b return sum(UpperCamelCase ) if __name__ == "__main__": print(F"""{solution() = }""")
37
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''pytorch''', '''script''': '''run_ddp.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf_dist.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7}, }, ] ) class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Optional[Any]: if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=__UpperCAmelCase ,) assert hasattr(self ,"""env""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}""" # distributed data settings lowerCAmelCase__ : Any = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None # creates estimator return HuggingFace( entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__UpperCAmelCase ,instance_count=__UpperCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__UpperCAmelCase ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__UpperCAmelCase ,py_version="""py36""" ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(2,)] ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: # create estimator lowerCAmelCase__ : List[Any] = self.create_estimator(__UpperCAmelCase ) # run training estimator.fit() # result dataframe lowerCAmelCase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase__ : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase__ : List[str] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,__UpperCAmelCase )
37
1
import argparse import math import traceback import dateutil.parser as date_parser import requests def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int: UpperCamelCase__ : List[str] = {} UpperCamelCase__ : List[str] = job["started_at"] UpperCamelCase__ : Union[str, Any] = job["completed_at"] UpperCamelCase__ : List[Any] = date_parser.parse(__lowerCAmelCase ) UpperCamelCase__ : Tuple = date_parser.parse(__lowerCAmelCase ) UpperCamelCase__ : Tuple = round((end_datetime - start_datetime).total_seconds() / 6_0.0 ) UpperCamelCase__ : Tuple = start UpperCamelCase__ : Optional[Any] = end UpperCamelCase__ : List[str] = duration_in_min return job_info def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=None ) -> Optional[Any]: UpperCamelCase__ : Tuple = None if token is not None: UpperCamelCase__ : List[Any] = {"Accept": "application/vnd.github+json", "Authorization": f'Bearer {token}'} UpperCamelCase__ : Union[str, Any] = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100' UpperCamelCase__ : List[Any] = requests.get(__lowerCAmelCase , headers=__lowerCAmelCase ).json() UpperCamelCase__ : Optional[Any] = {} try: job_time.update({job["name"]: extract_time_from_single_job(__lowerCAmelCase ) for job in result["jobs"]} ) UpperCamelCase__ : List[Any] = math.ceil((result["total_count"] - 100) / 100 ) for i in range(__lowerCAmelCase ): UpperCamelCase__ : str = requests.get(url + f'&page={i + 2}' , headers=__lowerCAmelCase ).json() job_time.update({job["name"]: extract_time_from_single_job(__lowerCAmelCase ) for job in result["jobs"]} ) return job_time except Exception: print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' ) return {} if __name__ == "__main__": lowerCamelCase : Optional[int] =argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') lowerCamelCase : List[Any] =parser.parse_args() lowerCamelCase : Dict =get_job_time(args.workflow_run_id) lowerCamelCase : Optional[int] =dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F"""{k}: {v["duration"]}""")
196
from ..utils import DummyObject, requires_backends class __a ( metaclass=A__ ): _lowerCAmelCase : str = ['''torch'''] def __init__( self : int , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[Any] = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Union[str, Any] = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : str , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[str] = ['''torch'''] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Any = ['''torch'''] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[Any] = ['''torch'''] def __init__( self : int , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Optional[int] = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(cls , ["torch"] ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> int: requires_backends(__lowerCAmelCase , ["torch"] ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]: requires_backends(__lowerCAmelCase , ["torch"] ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]: requires_backends(__lowerCAmelCase , ["torch"] ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]: requires_backends(__lowerCAmelCase , ["torch"] ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any: requires_backends(__lowerCAmelCase , ["torch"] ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Dict: requires_backends(__lowerCAmelCase , ["torch"] ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]: requires_backends(__lowerCAmelCase , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[str] = ['''torch'''] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Optional[int] = ['''torch'''] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : str = ['''torch'''] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Any = ['''torch'''] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : str = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Any = ['''torch'''] def __init__( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : int = ['''torch'''] def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Optional[int] = ['''torch'''] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[str] = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Any = ['''torch'''] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Any = ['''torch'''] def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : str , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Union[str, Any] = ['''torch'''] def __init__( self : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Any = ['''torch'''] def __init__( self : int , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Union[str, Any] = ['''torch'''] def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Optional[int] = ['''torch'''] def __init__( self : int , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[Any] = ['''torch'''] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Optional[int] = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : str = ['''torch'''] def __init__( self : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : str = ['''torch'''] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[Any] = ['''torch'''] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Union[str, Any] = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : str = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Union[str, Any] = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[Any] = ['''torch'''] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Optional[int] = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[str] = ['''torch'''] def __init__( self : int , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] )
196
1
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _UpperCamelCase : '''simple docstring''' @staticmethod def UpperCamelCase__ ( *lowerCAmelCase__ : Any , **lowerCAmelCase__ : Any ): """simple docstring""" pass @is_pipeline_test @require_vision @require_torch class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' _A : Optional[int] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def UpperCamelCase__ ( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) __SCREAMING_SNAKE_CASE : Any = [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] return object_detector, examples def UpperCamelCase__ ( self : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = object_detector(examples[0] , threshold=0.0 ) __SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCAmelCase__ ) self.assertGreater(lowerCAmelCase__ , 0 ) self.assertEqual( lowerCAmelCase__ , [ { """score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ ), """box""": {"""xmin""": ANY(lowerCAmelCase__ ), """ymin""": ANY(lowerCAmelCase__ ), """xmax""": ANY(lowerCAmelCase__ ), """ymax""": ANY(lowerCAmelCase__ )}, } for i in range(lowerCAmelCase__ ) ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def UpperCamelCase__ ( self : Optional[int] ): """simple docstring""" pass @require_torch def UpperCamelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) __SCREAMING_SNAKE_CASE : int = object_detector( """./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {"""score""": 0.72_35, """label""": """cat""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}}, {"""score""": 0.72_18, """label""": """remote""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}}, {"""score""": 0.71_84, """label""": """couch""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}}, {"""score""": 0.67_48, """label""": """remote""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}}, {"""score""": 0.66_56, """label""": """cat""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}}, {"""score""": 0.66_14, """label""": """couch""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}}, {"""score""": 0.64_56, """label""": """remote""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}}, {"""score""": 0.6_42, """label""": """remote""", """box""": {"""xmin""": 6_7, """ymin""": 2_7_4, """xmax""": 9_3, """ymax""": 2_9_7}}, {"""score""": 0.64_19, """label""": """cat""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}}, ] , ) __SCREAMING_SNAKE_CASE : List[Any] = object_detector( [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ [ {"""score""": 0.72_35, """label""": """cat""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}}, {"""score""": 0.72_18, """label""": """remote""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}}, {"""score""": 0.71_84, """label""": """couch""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}}, {"""score""": 0.67_48, """label""": """remote""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}}, {"""score""": 0.66_56, """label""": """cat""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}}, {"""score""": 0.66_14, """label""": """couch""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}}, {"""score""": 0.64_56, """label""": """remote""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}}, {"""score""": 0.6_42, """label""": """remote""", """box""": {"""xmin""": 6_7, """ymin""": 2_7_4, """xmax""": 9_3, """ymax""": 2_9_7}}, {"""score""": 0.64_19, """label""": """cat""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}}, ] ] , ) @require_torch @slow def UpperCamelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = pipeline("""zero-shot-object-detection""" ) __SCREAMING_SNAKE_CASE : List[str] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}}, {"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}}, {"""score""": 0.25_37, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}}, {"""score""": 0.14_74, """label""": """remote""", """box""": {"""xmin""": 3_3_5, """ymin""": 7_4, """xmax""": 3_7_1, """ymax""": 1_8_7}}, {"""score""": 0.12_08, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_4_2, """ymax""": 4_7_6}}, ] , ) __SCREAMING_SNAKE_CASE : Dict = object_detector( [ { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, ] , ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ [ {"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}}, {"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}}, {"""score""": 0.25_37, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}}, {"""score""": 0.14_74, """label""": """remote""", """box""": {"""xmin""": 3_3_5, """ymin""": 7_4, """xmax""": 3_7_1, """ymax""": 1_8_7}}, {"""score""": 0.12_08, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_4_2, """ymax""": 4_7_6}}, ], [ {"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}}, {"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}}, {"""score""": 0.25_37, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}}, {"""score""": 0.14_74, """label""": """remote""", """box""": {"""xmin""": 3_3_5, """ymin""": 7_4, """xmax""": 3_7_1, """ymax""": 1_8_7}}, {"""score""": 0.12_08, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_4_2, """ymax""": 4_7_6}}, ], ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def UpperCamelCase__ ( self : Any ): """simple docstring""" pass @require_torch @slow def UpperCamelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = 0.2 __SCREAMING_SNAKE_CASE : Dict = pipeline("""zero-shot-object-detection""" ) __SCREAMING_SNAKE_CASE : Any = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=lowerCAmelCase__ , ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}}, {"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}}, {"""score""": 0.25_37, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}}, ] , ) @require_torch @slow def UpperCamelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = 2 __SCREAMING_SNAKE_CASE : int = pipeline("""zero-shot-object-detection""" ) __SCREAMING_SNAKE_CASE : int = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=lowerCAmelCase__ , ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}}, {"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}}, ] , )
112
'''simple docstring''' import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa UpperCamelCase__ : str = logging.getLogger(__name__) class _UpperCamelCase ( lowerCamelCase__ ): '''simple docstring''' _A : Union[str, Any] = '''summarization''' _A : Optional[Any] = ['''loss'''] _A : Tuple = ROUGE_KEYS _A : int = '''rouge2''' def __init__( self : int , lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : List[str] ): """simple docstring""" if hparams.sortish_sampler and hparams.gpus > 1: __SCREAMING_SNAKE_CASE : Any = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" ) if hparams.sortish_sampler: raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" ) super().__init__(lowerCAmelCase__ , num_labels=lowerCAmelCase__ , mode=self.mode , **lowerCAmelCase__ ) use_task_specific_params(self.model , """summarization""" ) save_git_info(self.hparams.output_dir ) __SCREAMING_SNAKE_CASE : int = Path(self.output_dir ) / """metrics.json""" __SCREAMING_SNAKE_CASE : Optional[Any] = Path(self.output_dir ) / """hparams.pkl""" pickle_save(self.hparams , self.hparams_save_path ) __SCREAMING_SNAKE_CASE : Optional[int] = 0 __SCREAMING_SNAKE_CASE : List[Any] = defaultdict(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = self.config.model_type __SCREAMING_SNAKE_CASE : List[Any] = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size __SCREAMING_SNAKE_CASE : dict = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } __SCREAMING_SNAKE_CASE : List[Any] = { """train""": self.hparams.n_train, """val""": self.hparams.n_val, """test""": self.hparams.n_test, } __SCREAMING_SNAKE_CASE : Any = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} __SCREAMING_SNAKE_CASE : Any = { """train""": self.hparams.max_target_length, """val""": self.hparams.val_max_target_length, """test""": self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], F"target_lens: {self.target_lens}" assert self.target_lens["train"] <= self.target_lens["test"], F"target_lens: {self.target_lens}" if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) __SCREAMING_SNAKE_CASE : Any = get_git_info()["""repo_sha"""] __SCREAMING_SNAKE_CASE : Any = hparams.num_workers __SCREAMING_SNAKE_CASE : Tuple = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCAmelCase__ ): __SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.lang_code_to_id[hparams.tgt_lang] __SCREAMING_SNAKE_CASE : Any = self.decoder_start_token_id __SCREAMING_SNAKE_CASE : Optional[int] = ( SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset ) __SCREAMING_SNAKE_CASE : Dict = False __SCREAMING_SNAKE_CASE : Any = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: __SCREAMING_SNAKE_CASE : Optional[int] = self.hparams.eval_max_gen_length else: __SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.max_length __SCREAMING_SNAKE_CASE : Optional[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def UpperCamelCase__ ( self : str , lowerCAmelCase__ : Dict[str, torch.Tensor] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = { k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items() } save_json(lowerCAmelCase__ , Path(self.output_dir ) / """text_batch.json""" ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" ) __SCREAMING_SNAKE_CASE : Optional[int] = True return readable_batch def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : Any , **lowerCAmelCase__ : Optional[Any] ): """simple docstring""" return self.model(lowerCAmelCase__ , **lowerCAmelCase__ ) def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : List[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.batch_decode( lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ ) return lmap(str.strip , lowerCAmelCase__ ) def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.pad_token_id __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = batch["""input_ids"""], batch["""attention_mask"""] __SCREAMING_SNAKE_CASE : Tuple = batch["""labels"""] if isinstance(self.model , lowerCAmelCase__ ): __SCREAMING_SNAKE_CASE : str = self.model._shift_right(lowerCAmelCase__ ) else: __SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(lowerCAmelCase__ , lowerCAmelCase__ ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero __SCREAMING_SNAKE_CASE : Tuple = decoder_input_ids self.save_readable_batch(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = self(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ , use_cache=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = outputs["""logits"""] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id __SCREAMING_SNAKE_CASE : Tuple = nn.CrossEntropyLoss(ignore_index=lowerCAmelCase__ ) assert lm_logits.shape[-1] == self.vocab_size __SCREAMING_SNAKE_CASE : List[Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: __SCREAMING_SNAKE_CASE : List[Any] = nn.functional.log_softmax(lowerCAmelCase__ , dim=-1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = label_smoothed_nll_loss( lowerCAmelCase__ , lowerCAmelCase__ , self.hparams.label_smoothing , ignore_index=lowerCAmelCase__ ) return (loss,) @property def UpperCamelCase__ ( self : List[Any] ): """simple docstring""" return self.tokenizer.pad_token_id def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self._step(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = dict(zip(self.loss_names , lowerCAmelCase__ ) ) # tokens per batch __SCREAMING_SNAKE_CASE : Optional[int] = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum() __SCREAMING_SNAKE_CASE : str = batch["""input_ids"""].shape[0] __SCREAMING_SNAKE_CASE : str = batch["""input_ids"""].eq(self.pad ).sum() __SCREAMING_SNAKE_CASE : Optional[int] = batch["""input_ids"""].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ): """simple docstring""" return self._generative_step(lowerCAmelCase__ ) def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any]="val" ): """simple docstring""" self.step_count += 1 __SCREAMING_SNAKE_CASE : int = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} __SCREAMING_SNAKE_CASE : List[Any] = losses["""loss"""] __SCREAMING_SNAKE_CASE : int = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""] } __SCREAMING_SNAKE_CASE : List[Any] = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) __SCREAMING_SNAKE_CASE : torch.FloatTensor = torch.tensor(lowerCAmelCase__ ).type_as(lowerCAmelCase__ ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = {F"{prefix}_avg_{k}": x for k, x in losses.items()} __SCREAMING_SNAKE_CASE : Optional[int] = self.step_count self.metrics[prefix].append(lowerCAmelCase__ ) # callback writes this to self.metrics_save_path __SCREAMING_SNAKE_CASE : int = flatten_list([x["""preds"""] for x in outputs] ) return { "log": all_metrics, "preds": preds, F"{prefix}_loss": loss, F"{prefix}_{self.val_metric}": metric_tensor, } def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ): """simple docstring""" return calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ ) def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') __SCREAMING_SNAKE_CASE : List[str] = self.model.generate( batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowerCAmelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = (time.time() - ta) / batch["""input_ids"""].shape[0] __SCREAMING_SNAKE_CASE : List[str] = self.ids_to_clean_text(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = self.ids_to_clean_text(batch["""labels"""] ) __SCREAMING_SNAKE_CASE : Optional[Any] = self._step(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = dict(zip(self.loss_names , lowerCAmelCase__ ) ) __SCREAMING_SNAKE_CASE : Dict = self.calc_generative_metrics(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = np.mean(lmap(lowerCAmelCase__ , lowerCAmelCase__ ) ) base_metrics.update(gen_time=lowerCAmelCase__ , gen_len=lowerCAmelCase__ , preds=lowerCAmelCase__ , target=lowerCAmelCase__ , **lowerCAmelCase__ ) return base_metrics def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ): """simple docstring""" return self._generative_step(lowerCAmelCase__ ) def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : int ): """simple docstring""" return self.validation_epoch_end(lowerCAmelCase__ , prefix="""test""" ) def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.n_obs[type_path] __SCREAMING_SNAKE_CASE : str = self.target_lens[type_path] __SCREAMING_SNAKE_CASE : str = self.dataset_class( self.tokenizer , type_path=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , max_target_length=lowerCAmelCase__ , **self.dataset_kwargs , ) return dataset def UpperCamelCase__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : bool = False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dataset(lowerCAmelCase__ ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": __SCREAMING_SNAKE_CASE : Optional[int] = dataset.make_sortish_sampler(lowerCAmelCase__ , distributed=self.hparams.gpus > 1 ) return DataLoader( lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": __SCREAMING_SNAKE_CASE : Any = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( lowerCAmelCase__ , batch_sampler=lowerCAmelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , ) def UpperCamelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowerCAmelCase__ ) return dataloader def UpperCamelCase__ ( self : Union[str, Any] ): """simple docstring""" return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size ) def UpperCamelCase__ ( self : Tuple ): """simple docstring""" return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size ) @staticmethod def UpperCamelCase__ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ): """simple docstring""" BaseTransformer.add_model_specific_args(lowerCAmelCase__ , lowerCAmelCase__ ) add_generic_args(lowerCAmelCase__ , lowerCAmelCase__ ) parser.add_argument( """--max_source_length""" , default=1_0_2_4 , type=lowerCAmelCase__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--max_target_length""" , default=5_6 , type=lowerCAmelCase__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--val_max_target_length""" , default=1_4_2 , type=lowerCAmelCase__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--test_max_target_length""" , default=1_4_2 , type=lowerCAmelCase__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument("""--freeze_encoder""" , action="""store_true""" ) parser.add_argument("""--freeze_embeds""" , action="""store_true""" ) parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowerCAmelCase__ ) parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowerCAmelCase__ ) parser.add_argument("""--max_tokens_per_batch""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ ) parser.add_argument("""--logger_name""" , type=lowerCAmelCase__ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" ) parser.add_argument("""--n_train""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_val""" , type=lowerCAmelCase__ , default=5_0_0 , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_test""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" ) parser.add_argument( """--task""" , type=lowerCAmelCase__ , default="""summarization""" , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" ) parser.add_argument("""--label_smoothing""" , type=lowerCAmelCase__ , default=0.0 , required=lowerCAmelCase__ ) parser.add_argument("""--src_lang""" , type=lowerCAmelCase__ , default="""""" , required=lowerCAmelCase__ ) parser.add_argument("""--tgt_lang""" , type=lowerCAmelCase__ , default="""""" , required=lowerCAmelCase__ ) parser.add_argument("""--eval_beams""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ ) parser.add_argument( """--val_metric""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , choices=["""bleu""", """rouge2""", """loss""", None] ) parser.add_argument("""--eval_max_gen_length""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="""never generate more than n tokens""" ) parser.add_argument("""--save_top_k""" , type=lowerCAmelCase__ , default=1 , required=lowerCAmelCase__ , help="""How many checkpoints to save""" ) parser.add_argument( """--early_stopping_patience""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help=( """-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So""" """ val_check_interval will effect it.""" ) , ) return parser class _UpperCamelCase ( lowerCamelCase__ ): '''simple docstring''' _A : List[Any] = '''translation''' _A : int = ['''loss'''] _A : Union[str, Any] = ['''bleu'''] _A : Dict = '''bleu''' def __init__( self : Any , lowerCAmelCase__ : int , **lowerCAmelCase__ : Any ): """simple docstring""" super().__init__(lowerCAmelCase__ , **lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = hparams.src_lang __SCREAMING_SNAKE_CASE : Dict = hparams.tgt_lang def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ): """simple docstring""" return calculate_bleu(lowerCAmelCase__ , lowerCAmelCase__ ) def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: str=None ): Path(args.output_dir ).mkdir(exist_ok=_lowerCamelCase ) check_output_dir(_lowerCamelCase , expected_items=3 ) if model is None: if "summarization" in args.task: __SCREAMING_SNAKE_CASE : SummarizationModule = SummarizationModule(_lowerCamelCase ) else: __SCREAMING_SNAKE_CASE : SummarizationModule = TranslationModule(_lowerCamelCase ) __SCREAMING_SNAKE_CASE : Union[str, Any] = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith("""/tmp""" ) or str(args.output_dir ).startswith("""/var""" ) ): __SCREAMING_SNAKE_CASE : str = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger __SCREAMING_SNAKE_CASE : Any = os.environ.get("""WANDB_PROJECT""" , _lowerCamelCase ) __SCREAMING_SNAKE_CASE : str = WandbLogger(name=model.output_dir.name , project=_lowerCamelCase ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger __SCREAMING_SNAKE_CASE : Optional[int] = WandbLogger(name=model.output_dir.name , project=F"hf_{dataset}" ) if args.early_stopping_patience >= 0: __SCREAMING_SNAKE_CASE : str = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: __SCREAMING_SNAKE_CASE : Tuple = False __SCREAMING_SNAKE_CASE : Dict = args.val_metric == """loss""" __SCREAMING_SNAKE_CASE : pl.Trainer = generic_train( _lowerCamelCase , _lowerCamelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , _lowerCamelCase ) , early_stopping_callback=_lowerCamelCase , logger=_lowerCamelCase , ) pickle_save(model.hparams , model.output_dir / """hparams.pkl""" ) if not args.do_predict: return model __SCREAMING_SNAKE_CASE : Optional[int] = """""" __SCREAMING_SNAKE_CASE : Any = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=_lowerCamelCase ) ) if checkpoints: __SCREAMING_SNAKE_CASE : List[Any] = checkpoints[-1] __SCREAMING_SNAKE_CASE : str = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": UpperCamelCase__ : Optional[int] = argparse.ArgumentParser() UpperCamelCase__ : Dict = pl.Trainer.add_argparse_args(parser) UpperCamelCase__ : List[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd()) UpperCamelCase__ : List[str] = parser.parse_args() main(args)
112
1
'''simple docstring''' from string import ascii_lowercase, ascii_uppercase def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if not sentence: return "" lowerCAmelCase__ : int = dict(zip(UpperCamelCase , UpperCamelCase ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
359
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : List[str] = IFInpaintingSuperResolutionPipeline __lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} __lowercase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} ) __lowercase : Optional[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''} def UpperCAmelCase_ ( self ) -> Any: return self._get_superresolution_dummy_components() def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ) -> List[Any]: if str(__UpperCAmelCase ).startswith("""mps""" ): lowerCAmelCase__ : Any = torch.manual_seed(__UpperCAmelCase ) else: lowerCAmelCase__ : Dict = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = floats_tensor((1, 3, 16, 16) ,rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) lowerCAmelCase__ : Any = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) lowerCAmelCase__ : Dict = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """original_image""": original_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,) def UpperCAmelCase_ ( self ) -> Union[str, Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def UpperCAmelCase_ ( self ) -> Optional[int]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" ,reason="""float16 requires CUDA""" ) def UpperCAmelCase_ ( self ) -> List[Any]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def UpperCAmelCase_ ( self ) -> str: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def UpperCAmelCase_ ( self ) -> List[Any]: self._test_save_load_local() def UpperCAmelCase_ ( self ) -> int: self._test_inference_batch_single_identical( expected_max_diff=1E-2 ,)
184
0
"""simple docstring""" from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : List[Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]} return Dataset.from_dict(_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : Any = self._create_example_records() UpperCAmelCase : int = Dataset.from_list(_SCREAMING_SNAKE_CASE ) self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] ) for i, r in enumerate(_SCREAMING_SNAKE_CASE ): self.assertDictEqual(_SCREAMING_SNAKE_CASE , example_records[i] ) def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : List[str] = self._create_example_records() UpperCAmelCase : List[str] = Dataset.from_list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[str] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} ) self.assertEqual(dset.info , dset_from_dict.info ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: # checks what happens with missing columns '''simple docstring''' UpperCAmelCase : Optional[int] = [{"""col_1""": 1}, {"""col_2""": """x"""}] UpperCAmelCase : List[str] = Dataset.from_list(_SCREAMING_SNAKE_CASE ) self.assertDictEqual(dset[0] , {"""col_1""": 1} ) self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns def SCREAMING_SNAKE_CASE ( self ) -> Tuple: # checks if the type can be inferred from the second record '''simple docstring''' UpperCAmelCase : Union[str, Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}] UpperCAmelCase : List[Any] = Dataset.from_list(_SCREAMING_SNAKE_CASE ) self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Dict = Dataset.from_list([] ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 0 ) self.assertListEqual(dset.column_names , [] )
109
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
53
0
'''simple docstring''' import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase__( __UpperCamelCase: Union[str, Any] ): """simple docstring""" return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() ) def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: Dict ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue SCREAMING_SNAKE_CASE : Tuple = key.replace('heads.cmd.mim_head.cls.predictions' ,'mmm_image_head' ) SCREAMING_SNAKE_CASE : Dict = key.replace('heads.cmd.mlm_head.cls.predictions' ,'mmm_text_head' ) SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('heads.cmd.itm_head.cls' ,'itm_head' ) SCREAMING_SNAKE_CASE : List[str] = key.replace('heads.cmd.itm_head.pooler' ,'itm_head.pooler' ) SCREAMING_SNAKE_CASE : int = key.replace('heads.cmd.clip_head.logit_scale' ,'flava.logit_scale' ) SCREAMING_SNAKE_CASE : Tuple = key.replace('heads.fairseq_mlm.cls.predictions' ,'mlm_head' ) SCREAMING_SNAKE_CASE : str = key.replace('heads.imagenet.mim_head.cls.predictions' ,'mim_head' ) SCREAMING_SNAKE_CASE : Any = key.replace('mm_text_projection' ,'flava.text_to_mm_projection' ) SCREAMING_SNAKE_CASE : Optional[int] = key.replace('mm_image_projection' ,'flava.image_to_mm_projection' ) SCREAMING_SNAKE_CASE : Any = key.replace('image_encoder.module' ,'flava.image_model' ) SCREAMING_SNAKE_CASE : Optional[int] = key.replace('text_encoder.module' ,'flava.text_model' ) SCREAMING_SNAKE_CASE : List[Any] = key.replace('mm_encoder.module.encoder.cls_token' ,'flava.multimodal_model.cls_token' ) SCREAMING_SNAKE_CASE : List[Any] = key.replace('mm_encoder.module' ,'flava.multimodal_model' ) SCREAMING_SNAKE_CASE : Any = key.replace('text_projection' ,'flava.text_projection' ) SCREAMING_SNAKE_CASE : str = key.replace('image_projection' ,'flava.image_projection' ) SCREAMING_SNAKE_CASE : Optional[int] = value.float() for key, value in codebook_state_dict.items(): SCREAMING_SNAKE_CASE : Optional[Any] = value return upgrade @torch.no_grad() def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Tuple ,__UpperCamelCase: List[Any]=None ): """simple docstring""" if config_path is not None: SCREAMING_SNAKE_CASE : Optional[Any] = FlavaConfig.from_pretrained(__UpperCamelCase ) else: SCREAMING_SNAKE_CASE : Optional[int] = FlavaConfig() SCREAMING_SNAKE_CASE : int = FlavaForPreTraining(__UpperCamelCase ).eval() SCREAMING_SNAKE_CASE : Tuple = convert_dalle_checkpoint(__UpperCamelCase ,__UpperCamelCase ,save_checkpoint=__UpperCamelCase ) if os.path.exists(__UpperCamelCase ): SCREAMING_SNAKE_CASE : str = torch.load(__UpperCamelCase ,map_location='cpu' ) else: SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(__UpperCamelCase ,map_location='cpu' ) SCREAMING_SNAKE_CASE : Optional[Any] = upgrade_state_dict(__UpperCamelCase ,__UpperCamelCase ) hf_model.load_state_dict(__UpperCamelCase ) SCREAMING_SNAKE_CASE : int = hf_model.state_dict() SCREAMING_SNAKE_CASE : List[Any] = count_parameters(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = count_parameters(__UpperCamelCase ) + count_parameters(__UpperCamelCase ) assert torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) hf_model.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") UpperCamelCase_ = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
246
'''simple docstring''' def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : int = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): SCREAMING_SNAKE_CASE : List[str] = n - k # Calculate C(n,k) for i in range(__UpperCamelCase ): result *= n - i result //= i + 1 return result def lowercase__( __UpperCamelCase: int ): """simple docstring""" return binomial_coefficient(2 * node_count ,__UpperCamelCase ) // (node_count + 1) def lowercase__( __UpperCamelCase: int ): """simple docstring""" if n < 0: raise ValueError('factorial() not defined for negative values' ) SCREAMING_SNAKE_CASE : Optional[Any] = 1 for i in range(1 ,n + 1 ): result *= i return result def lowercase__( __UpperCamelCase: int ): """simple docstring""" return catalan_number(__UpperCamelCase ) * factorial(__UpperCamelCase ) if __name__ == "__main__": UpperCamelCase_ = int(input("Enter the number of nodes: ").strip() or 0) if node_count <= 0: raise ValueError("We need some nodes to work with.") print( F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """ F"""binary trees and {catalan_number(node_count)} binary search trees.""" )
246
1
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging A__ : Optional[int] = logging.get_logger(__name__) class lowercase__ ( snake_case__ ): _UpperCAmelCase :Tuple = ["input_features", "is_longer"] def __init__( self : Any , snake_case__ : Union[str, Any]=64 , snake_case__ : Dict=4_8000 , snake_case__ : List[Any]=480 , snake_case__ : Union[str, Any]=10 , snake_case__ : Dict=1024 , snake_case__ : Dict=0.0 , snake_case__ : Tuple=False , snake_case__ : float = 0 , snake_case__ : float = 1_4000 , snake_case__ : int = None , snake_case__ : str = "fusion" , snake_case__ : str = "repeatpad" , **snake_case__ : Tuple , ): super().__init__( feature_size=snake_case__ , sampling_rate=snake_case__ , padding_value=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , ) lowerCamelCase_ : Optional[int] =top_db lowerCamelCase_ : Tuple =truncation lowerCamelCase_ : int =padding lowerCamelCase_ : List[str] =fft_window_size lowerCamelCase_ : Union[str, Any] =(fft_window_size >> 1) + 1 lowerCamelCase_ : Optional[int] =hop_length lowerCamelCase_ : Dict =max_length_s lowerCamelCase_ : Dict =max_length_s * sampling_rate lowerCamelCase_ : int =sampling_rate lowerCamelCase_ : int =frequency_min lowerCamelCase_ : List[Any] =frequency_max lowerCamelCase_ : int =mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case__ , min_frequency=snake_case__ , max_frequency=snake_case__ , sampling_rate=snake_case__ , norm=snake_case__ , mel_scale="htk" , ) lowerCamelCase_ : Optional[int] =mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case__ , min_frequency=snake_case__ , max_frequency=snake_case__ , sampling_rate=snake_case__ , norm="slaney" , mel_scale="slaney" , ) def UpperCAmelCase__ ( self : Optional[int] ): lowerCamelCase_ : Dict =copy.deepcopy(self.__dict__ ) lowerCamelCase_ : Optional[Any] =self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def UpperCAmelCase__ ( self : str , snake_case__ : np.array , snake_case__ : Optional[np.array] = None ): lowerCamelCase_ : int =spectrogram( snake_case__ , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case__ , log_mel="dB" , ) return log_mel_spectrogram.T def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ): lowerCamelCase_ : str =np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk lowerCamelCase_ : Optional[int] =[0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk lowerCamelCase_ : int =[0] # randomly choose index for each part lowerCamelCase_ : List[str] =np.random.choice(ranges[0] ) lowerCamelCase_ : List[Any] =np.random.choice(ranges[1] ) lowerCamelCase_ : str =np.random.choice(ranges[2] ) lowerCamelCase_ : Optional[int] =mel[idx_front : idx_front + chunk_frames, :] lowerCamelCase_ : Tuple =mel[idx_middle : idx_middle + chunk_frames, :] lowerCamelCase_ : int =mel[idx_back : idx_back + chunk_frames, :] lowerCamelCase_ : Tuple =torch.tensor(mel[None, None, :] ) lowerCamelCase_ : Optional[int] =torch.nn.functional.interpolate( snake_case__ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=snake_case__ ) lowerCamelCase_ : Union[str, Any] =mel_shrink[0][0].numpy() lowerCamelCase_ : Optional[Any] =np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def UpperCAmelCase__ ( self : str , snake_case__ : np.array , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : List[str] ): if waveform.shape[0] > max_length: if truncation == "rand_trunc": lowerCamelCase_ : Optional[Any] =True # random crop to max_length (for compatibility) -> this should be handled by self.pad lowerCamelCase_ : str =len(snake_case__ ) - max_length lowerCamelCase_ : int =np.random.randint(0 , overflow + 1 ) lowerCamelCase_ : Optional[Any] =waveform[idx : idx + max_length] lowerCamelCase_ : Union[str, Any] =self._np_extract_fbank_features(snake_case__ , self.mel_filters_slaney )[None, :] elif truncation == "fusion": lowerCamelCase_ : Optional[Any] =self._np_extract_fbank_features(snake_case__ , self.mel_filters ) lowerCamelCase_ : int =max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed lowerCamelCase_ : str =mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. lowerCamelCase_ : int =np.stack([mel, mel, mel, mel] , axis=0 ) lowerCamelCase_ : int =False else: lowerCamelCase_ : Union[str, Any] =self._random_mel_fusion(snake_case__ , snake_case__ , snake_case__ ) lowerCamelCase_ : Any =True else: raise NotImplementedError(F"""data_truncating {truncation} not implemented""" ) else: lowerCamelCase_ : Optional[int] =False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": lowerCamelCase_ : str =int(max_length / len(snake_case__ ) ) lowerCamelCase_ : Any =np.stack(np.tile(snake_case__ , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": lowerCamelCase_ : List[str] =int(max_length / len(snake_case__ ) ) lowerCamelCase_ : Optional[Any] =np.stack(np.tile(snake_case__ , snake_case__ ) ) lowerCamelCase_ : Union[str, Any] =np.pad(snake_case__ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 ) if truncation == "fusion": lowerCamelCase_ : Dict =self._np_extract_fbank_features(snake_case__ , self.mel_filters ) lowerCamelCase_ : Optional[Any] =np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: lowerCamelCase_ : Optional[int] =self._np_extract_fbank_features(snake_case__ , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : List[str] , snake_case__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case__ : str = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : List[str] , ): lowerCamelCase_ : str =truncation if truncation is not None else self.truncation lowerCamelCase_ : List[str] =padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) lowerCamelCase_ : int =isinstance(snake_case__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) lowerCamelCase_ : Any =is_batched_numpy or ( isinstance(snake_case__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase_ : Any =[np.asarray(snake_case__ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case__ , np.ndarray ): lowerCamelCase_ : Union[str, Any] =np.asarray(snake_case__ , dtype=np.floataa ) elif isinstance(snake_case__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCamelCase_ : Tuple =raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase_ : str =[np.asarray(snake_case__ )] # convert to mel spectrogram, truncate and pad if needed. lowerCamelCase_ : Any =[ self._get_input_mel(snake_case__ , max_length if max_length else self.nb_max_samples , snake_case__ , snake_case__ ) for waveform in raw_speech ] lowerCamelCase_ : List[Any] =[] lowerCamelCase_ : Optional[Any] =[] for mel, longer in padded_inputs: input_mel.append(snake_case__ ) is_longer.append(snake_case__ ) if truncation == "fusion" and sum(snake_case__ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer lowerCamelCase_ : List[str] =np.random.randint(0 , len(snake_case__ ) ) lowerCamelCase_ : List[str] =True if isinstance(input_mel[0] , snake_case__ ): lowerCamelCase_ : Union[str, Any] =[np.asarray(snake_case__ , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool lowerCamelCase_ : Union[str, Any] =[[longer] for longer in is_longer] lowerCamelCase_ : Optional[int] ={"input_features": input_mel, "is_longer": is_longer} lowerCamelCase_ : List[str] =BatchFeature(snake_case__ ) if return_tensors is not None: lowerCamelCase_ : str =input_features.convert_to_tensors(snake_case__ ) return input_features
144
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : int = logging.get_logger(__name__) A__ : List[str] = { 'google/pix2struct-textcaps-base': ( 'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json' ), } class lowercase__ ( snake_case__ ): _UpperCAmelCase :List[Any] = "pix2struct_text_model" _UpperCAmelCase :str = ["past_key_values"] _UpperCAmelCase :str = { "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Dict , snake_case__ : Any=5_0244 , snake_case__ : Optional[int]=768 , snake_case__ : Dict=64 , snake_case__ : List[str]=2048 , snake_case__ : Dict=12 , snake_case__ : Any=12 , snake_case__ : Dict=32 , snake_case__ : int=128 , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=1E-6 , snake_case__ : Any=1.0 , snake_case__ : int="gelu_new" , snake_case__ : Optional[Any]=0 , snake_case__ : Any=False , snake_case__ : Any=0 , snake_case__ : Any=1 , snake_case__ : Optional[int]=False , snake_case__ : Tuple=True , **snake_case__ : Any , ): lowerCamelCase_ : List[str] =vocab_size lowerCamelCase_ : Tuple =hidden_size lowerCamelCase_ : Optional[int] =d_kv lowerCamelCase_ : List[Any] =d_ff lowerCamelCase_ : Tuple =num_layers lowerCamelCase_ : Optional[int] =num_heads lowerCamelCase_ : Any =relative_attention_num_buckets lowerCamelCase_ : Optional[int] =relative_attention_max_distance lowerCamelCase_ : List[Any] =dropout_rate lowerCamelCase_ : str =layer_norm_epsilon lowerCamelCase_ : int =initializer_factor lowerCamelCase_ : str =use_cache lowerCamelCase_ : int =eos_token_id lowerCamelCase_ : Optional[Any] =decoder_start_token_id # for backwards compatibility lowerCamelCase_ : Optional[Any] =dense_act_fn super().__init__( pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , ) @classmethod def UpperCAmelCase__ ( cls : Tuple , snake_case__ : Union[str, os.PathLike] , **snake_case__ : str ): cls._set_token_in_kwargs(snake_case__ ) lowerCamelCase_ , lowerCamelCase_ : Any =cls.get_config_dict(snake_case__ , **snake_case__ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowerCamelCase_ : List[Any] =config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class lowercase__ ( snake_case__ ): _UpperCAmelCase :List[Any] = "pix2struct_vision_model" def __init__( self : Optional[int] , snake_case__ : Tuple=768 , snake_case__ : str=768 , snake_case__ : Union[str, Any]=2048 , snake_case__ : Tuple=64 , snake_case__ : List[Any]=12 , snake_case__ : Dict=12 , snake_case__ : int="gelu_new" , snake_case__ : str=1E-6 , snake_case__ : int=0.0 , snake_case__ : int=0.0 , snake_case__ : Dict=1E-10 , snake_case__ : Tuple=1.0 , snake_case__ : int=4096 , snake_case__ : Tuple=32 , snake_case__ : List[str]=128 , **snake_case__ : List[Any] , ): super().__init__(**snake_case__ ) lowerCamelCase_ : int =hidden_size lowerCamelCase_ : List[Any] =patch_embed_hidden_size lowerCamelCase_ : Tuple =d_ff lowerCamelCase_ : List[Any] =dropout_rate lowerCamelCase_ : Dict =num_hidden_layers lowerCamelCase_ : List[str] =num_attention_heads lowerCamelCase_ : Optional[Any] =initializer_range lowerCamelCase_ : int =initializer_factor lowerCamelCase_ : Any =attention_dropout lowerCamelCase_ : List[str] =layer_norm_eps lowerCamelCase_ : int =dense_act_fn lowerCamelCase_ : Optional[Any] =seq_len lowerCamelCase_ : Optional[int] =relative_attention_num_buckets lowerCamelCase_ : Optional[int] =relative_attention_max_distance lowerCamelCase_ : Dict =d_kv @classmethod def UpperCAmelCase__ ( cls : Optional[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Dict ): cls._set_token_in_kwargs(snake_case__ ) lowerCamelCase_ , lowerCamelCase_ : Dict =cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowerCamelCase_ : List[Any] =config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class lowercase__ ( snake_case__ ): _UpperCAmelCase :str = "pix2struct" _UpperCAmelCase :List[str] = True def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : Optional[Any]=None , snake_case__ : List[str]=1.0 , snake_case__ : List[Any]=0.02 , snake_case__ : List[Any]=False , snake_case__ : int=False , snake_case__ : Any=True , **snake_case__ : List[Any] , ): super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ ) if text_config is None: lowerCamelCase_ : Dict ={} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." ) if vision_config is None: lowerCamelCase_ : int ={} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." ) lowerCamelCase_ : Any =PixaStructTextConfig(**snake_case__ ) lowerCamelCase_ : Optional[Any] =PixaStructVisionConfig(**snake_case__ ) lowerCamelCase_ : str =self.text_config.decoder_start_token_id lowerCamelCase_ : Optional[int] =self.text_config.pad_token_id lowerCamelCase_ : List[Any] =self.text_config.eos_token_id lowerCamelCase_ : int =initializer_factor lowerCamelCase_ : Optional[Any] =initializer_range lowerCamelCase_ : Any =self.initializer_range lowerCamelCase_ : List[Any] =self.initializer_range lowerCamelCase_ : List[str] =is_vqa @classmethod def UpperCAmelCase__ ( cls : List[str] , snake_case__ : PixaStructTextConfig , snake_case__ : PixaStructVisionConfig , **snake_case__ : Optional[int] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ ) def UpperCAmelCase__ ( self : Any ): lowerCamelCase_ : Tuple =copy.deepcopy(self.__dict__ ) lowerCamelCase_ : Dict =self.text_config.to_dict() lowerCamelCase_ : Union[str, Any] =self.vision_config.to_dict() lowerCamelCase_ : Dict =self.__class__.model_type return output
144
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { 'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class lowercase_ ( A ): """simple docstring""" lowerCamelCase_ = '''deformable_detr''' lowerCamelCase_ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : str , __lowerCamelCase : int=True , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Tuple=3_0_0 , __lowerCamelCase : int=1_0_2_4 , __lowerCamelCase : str=6 , __lowerCamelCase : Optional[int]=1_0_2_4 , __lowerCamelCase : List[str]=8 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : List[Any]=1_0_2_4 , __lowerCamelCase : Dict=8 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]="relu" , __lowerCamelCase : Optional[int]=2_5_6 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : List[str]=0.0 , __lowerCamelCase : List[Any]=0.0_2 , __lowerCamelCase : str=1.0 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Any="sine" , __lowerCamelCase : Tuple="resnet50" , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : int=4 , __lowerCamelCase : int=4 , __lowerCamelCase : Any=4 , __lowerCamelCase : int=False , __lowerCamelCase : Optional[Any]=3_0_0 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=1 , __lowerCamelCase : Dict=5 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Dict=1 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Dict=5 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.2_5 , __lowerCamelCase : List[Any]=False , **__lowerCamelCase : List[str] , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) _SCREAMING_SNAKE_CASE = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): _SCREAMING_SNAKE_CASE = backbone_config.get("model_type" ) _SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type] _SCREAMING_SNAKE_CASE = config_class.from_dict(__lowerCamelCase ) _SCREAMING_SNAKE_CASE = use_timm_backbone _SCREAMING_SNAKE_CASE = backbone_config _SCREAMING_SNAKE_CASE = num_channels _SCREAMING_SNAKE_CASE = num_queries _SCREAMING_SNAKE_CASE = max_position_embeddings _SCREAMING_SNAKE_CASE = d_model _SCREAMING_SNAKE_CASE = encoder_ffn_dim _SCREAMING_SNAKE_CASE = encoder_layers _SCREAMING_SNAKE_CASE = encoder_attention_heads _SCREAMING_SNAKE_CASE = decoder_ffn_dim _SCREAMING_SNAKE_CASE = decoder_layers _SCREAMING_SNAKE_CASE = decoder_attention_heads _SCREAMING_SNAKE_CASE = dropout _SCREAMING_SNAKE_CASE = attention_dropout _SCREAMING_SNAKE_CASE = activation_dropout _SCREAMING_SNAKE_CASE = activation_function _SCREAMING_SNAKE_CASE = init_std _SCREAMING_SNAKE_CASE = init_xavier_std _SCREAMING_SNAKE_CASE = encoder_layerdrop _SCREAMING_SNAKE_CASE = auxiliary_loss _SCREAMING_SNAKE_CASE = position_embedding_type _SCREAMING_SNAKE_CASE = backbone _SCREAMING_SNAKE_CASE = use_pretrained_backbone _SCREAMING_SNAKE_CASE = dilation # deformable attributes _SCREAMING_SNAKE_CASE = num_feature_levels _SCREAMING_SNAKE_CASE = encoder_n_points _SCREAMING_SNAKE_CASE = decoder_n_points _SCREAMING_SNAKE_CASE = two_stage _SCREAMING_SNAKE_CASE = two_stage_num_proposals _SCREAMING_SNAKE_CASE = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher _SCREAMING_SNAKE_CASE = class_cost _SCREAMING_SNAKE_CASE = bbox_cost _SCREAMING_SNAKE_CASE = giou_cost # Loss coefficients _SCREAMING_SNAKE_CASE = mask_loss_coefficient _SCREAMING_SNAKE_CASE = dice_loss_coefficient _SCREAMING_SNAKE_CASE = bbox_loss_coefficient _SCREAMING_SNAKE_CASE = giou_loss_coefficient _SCREAMING_SNAKE_CASE = eos_coefficient _SCREAMING_SNAKE_CASE = focal_alpha _SCREAMING_SNAKE_CASE = disable_custom_kernels super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase ) @property def lowerCAmelCase_ ( self : Any ): """simple docstring""" return self.encoder_attention_heads @property def lowerCAmelCase_ ( self : Tuple ): """simple docstring""" return self.d_model def lowerCAmelCase_ ( self : Optional[int] ): """simple docstring""" _SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: _SCREAMING_SNAKE_CASE = self.backbone_config.to_dict() _SCREAMING_SNAKE_CASE = self.__class__.model_type return output
111
'''simple docstring''' import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class lowercase_ : """simple docstring""" def lowerCAmelCase_ ( self : int , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ): """simple docstring""" return None class lowercase_ : """simple docstring""" def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ): """simple docstring""" return None class lowercase_ ( unittest.TestCase ): """simple docstring""" lowerCamelCase_ = [ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def lowerCAmelCase_ ( self : Union[str, Any] ): """simple docstring""" for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__lowerCamelCase , "tf" , 1_2 , **__lowerCamelCase ) @require_torch @slow def lowerCAmelCase_ ( self : List[Any] ): """simple docstring""" for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__lowerCamelCase , "pt" , 1_2 , **__lowerCamelCase ) @require_torch @slow def lowerCAmelCase_ ( self : List[Any] ): """simple docstring""" from transformers import BertModel _SCREAMING_SNAKE_CASE = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"] with NamedTemporaryFile(mode="w+t" ) as vocab_file: vocab_file.write("\n".join(__lowerCamelCase ) ) vocab_file.flush() _SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: _SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(__lowerCamelCase ) ) ) model.save_pretrained(__lowerCamelCase ) self._test_export(__lowerCamelCase , "pt" , 1_2 , __lowerCamelCase ) @require_tf @slow def lowerCAmelCase_ ( self : Optional[int] ): """simple docstring""" for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: _SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "tf" , 1_2 , **__lowerCamelCase ) _SCREAMING_SNAKE_CASE = quantize(Path(__lowerCamelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) @require_torch @slow def lowerCAmelCase_ ( self : Union[str, Any] ): """simple docstring""" for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: _SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "pt" , 1_2 , **__lowerCamelCase ) _SCREAMING_SNAKE_CASE = quantize(__lowerCamelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) def lowerCAmelCase_ ( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : int ): """simple docstring""" try: # Compute path with TemporaryDirectory() as tempdir: _SCREAMING_SNAKE_CASE = Path(__lowerCamelCase ).joinpath("model.onnx" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) return path except Exception as e: self.fail(__lowerCamelCase ) @require_torch @require_tokenizers @slow def lowerCAmelCase_ ( self : Tuple ): """simple docstring""" from transformers import BertModel _SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) _SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "pt" ) @require_tf @require_tokenizers @slow def lowerCAmelCase_ ( self : Any ): """simple docstring""" from transformers import TFBertModel _SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) _SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "tf" ) def lowerCAmelCase_ ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ): """simple docstring""" _SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"] _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = infer_shapes(__lowerCamelCase , __lowerCamelCase ) # Assert all variables are present self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , __lowerCamelCase ) self.assertSequenceEqual(variable_names[3:] , __lowerCamelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} ) self.assertDictEqual(shapes["output_1"] , {0: "batch"} ) def lowerCAmelCase_ ( self : Union[str, Any] ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask", "token_type_ids"] _SCREAMING_SNAKE_CASE = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]} _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , __lowerCamelCase , __lowerCamelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__lowerCamelCase ) , 3 ) # Should have exactly the same input names self.assertEqual(set(__lowerCamelCase ) , set(__lowerCamelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__lowerCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , __lowerCamelCase , __lowerCamelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__lowerCamelCase ) , 1 ) self.assertEqual(len(__lowerCamelCase ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens["input_ids"] ) self.assertEqual(ordered_input_names[0] , "input_ids" ) def lowerCAmelCase_ ( self : Optional[int] ): """simple docstring""" _SCREAMING_SNAKE_CASE = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" ) self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
111
1
import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def __snake_case ( ): """simple docstring""" A_ = torch.nn.Linear(2 ,4 ) A_ = torch.optim.AdamW(model.parameters() ,lr=1.0 ) A_ = torch.optim.lr_scheduler.OneCycleLR(__UpperCamelCase ,max_lr=0.01 ,steps_per_epoch=2 ,epochs=1 ) A_ = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) A_ = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" return (model.weight.abs().sum() + model.bias.abs().sum()).item() def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" A_ = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(__UpperCamelCase ) class _a ( snake_case_ ): """simple docstring""" @require_cuda def __A ( self : Any ): A_ = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(UpperCAmelCase ): A_ = Accelerator(cpu=UpperCAmelCase ) def __A ( self : Optional[Any] ): A_ = Accelerator() A_ = GradientState() assert state.num_steps == 1 A_ = 4 assert state.num_steps == 4 assert state.sync_gradients is True A_ = False assert state.sync_gradients is False GradientState._reset_state() def __A ( self : List[str] ): A_ = Accelerator() A_ , A_ , A_ , A_ , A_ = create_components() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) = accelerator.prepare(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def __A ( self : Dict ): A_ = Accelerator() A_ , A_ , A_ , A_ , A_ = create_components() accelerator.prepare(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def __A ( self : List[str] ): PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*UpperCAmelCase : Dict , **UpperCAmelCase : List[Any] ): pass with patch("torch.cuda.set_device" , UpperCAmelCase ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ): A_ = Accelerator() self.assertEqual(str(accelerator.state.device ) , "cuda:64" ) def __A ( self : List[Any] ): A_ = Accelerator() A_ , A_ , A_ , A_ , A_ = create_components() accelerator.prepare(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = get_signature(UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(UpperCAmelCase ) # make sure random weights don't match load_random_weights(UpperCAmelCase ) self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase ) ) > 1E-3 ) # make sure loaded weights match accelerator.load_state(UpperCAmelCase ) self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase ) ) < 1E-3 ) def __A ( self : Tuple ): A_ = Accelerator() A_ , A_ , A_ , A_ , A_ = create_components() accelerator.prepare(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = get_signature(UpperCAmelCase ) # saving hook def save_config(UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] ): A_ = {"class_name": models[0].__class__.__name__} with open(os.path.join(UpperCAmelCase , "data.json" ) , "w" ) as f: json.dump(UpperCAmelCase , UpperCAmelCase ) # loading hook def load_config(UpperCAmelCase : Optional[int] , UpperCAmelCase : str ): with open(os.path.join(UpperCAmelCase , "data.json" ) , "r" ) as f: A_ = json.load(UpperCAmelCase ) A_ = config["class_name"] A_ = accelerator.register_save_state_pre_hook(UpperCAmelCase ) A_ = accelerator.register_load_state_pre_hook(UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(UpperCAmelCase ) # make sure random weights don't match with hooks load_random_weights(UpperCAmelCase ) self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase ) ) > 1E-3 ) # random class name to verify correct one is loaded A_ = "random" # make sure loaded weights match with hooks accelerator.load_state(UpperCAmelCase ) self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase ) ) < 1E-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(UpperCAmelCase ) # make sure random weights don't match with hooks removed load_random_weights(UpperCAmelCase ) self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase ) ) > 1E-3 ) # random class name to verify correct one is loaded A_ = "random" # make sure loaded weights match with hooks removed accelerator.load_state(UpperCAmelCase ) self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase ) ) < 1E-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def __A ( self : Dict ): A_ = Accelerator() A_ , A_ , A_ , A_ , A_ = create_components() A_ = None # This should work A_ , A_ , A_ , A_ , A_ , A_ = accelerator.prepare( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) self.assertTrue(dummy_obj is None ) def __A ( self : Union[str, Any] ): A_ = Accelerator() A_ , A_ , A_ , A_ , A_ = create_components() A_ = [1, 2, 3] # This should work A_ , A_ , A_ , A_ , A_ , A_ = accelerator.prepare( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) self.assertEqual( getattr(UpperCAmelCase , "_is_accelerate_prepared" , UpperCAmelCase ) , UpperCAmelCase , "Dummy object should have `_is_accelerate_prepared` set to `True`" , ) self.assertEqual( getattr(UpperCAmelCase , "_is_accelerate_prepared" , UpperCAmelCase ) , UpperCAmelCase , "Model is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(UpperCAmelCase , "_is_accelerate_prepared" , UpperCAmelCase ) , UpperCAmelCase , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(UpperCAmelCase , "_is_accelerate_prepared" , UpperCAmelCase ) , UpperCAmelCase , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(UpperCAmelCase , "_is_accelerate_prepared" , UpperCAmelCase ) , UpperCAmelCase , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(UpperCAmelCase , "_is_accelerate_prepared" , UpperCAmelCase ) , UpperCAmelCase , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , ) @slow @require_bnb def __A ( self : List[Any] ): from transformers import AutoModelForCausalLM A_ = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=UpperCAmelCase , device_map={"": 0} , ) A_ = Accelerator() # This should work A_ = accelerator.prepare(UpperCAmelCase ) @slow @require_bnb def __A ( self : List[str] ): from transformers import AutoModelForCausalLM A_ = Accelerator() with init_empty_weights(): A_ = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) model.tie_weights() A_ = infer_auto_device_map(UpperCAmelCase ) A_ = "cpu" A_ = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , device_map=UpperCAmelCase , load_in_abit=UpperCAmelCase , llm_inta_enable_fpaa_cpu_offload=UpperCAmelCase ) # This should not work and get value error with self.assertRaises(UpperCAmelCase ): A_ = accelerator.prepare(UpperCAmelCase ) @slow @require_bnb @require_multi_gpu def __A ( self : Any ): from transformers import AutoModelForCausalLM A_ = {"distributed_type": DistributedType.MULTI_GPU} with init_empty_weights(): A_ = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) model.tie_weights() A_ = infer_auto_device_map(UpperCAmelCase ) A_ = 1 A_ = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=UpperCAmelCase , device_map=UpperCAmelCase , ) A_ = Accelerator() # This should not work and get value error with self.assertRaises(UpperCAmelCase ): A_ = accelerator.prepare(UpperCAmelCase ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def __A ( self : Optional[Any] ): from transformers import AutoModelForCausalLM with init_empty_weights(): A_ = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) A_ = infer_auto_device_map(UpperCAmelCase ) A_ = 1 A_ = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=UpperCAmelCase , device_map=UpperCAmelCase , ) A_ = Accelerator() # This should work A_ = accelerator.prepare(UpperCAmelCase ) @require_cuda def __A ( self : List[Any] ): A_ = torch.nn.Linear(10 , 10 ) A_ = torch.optim.SGD(model.parameters() , lr=0.01 ) A_ = Accelerator(cpu=UpperCAmelCase ) A_ = accelerator.prepare(UpperCAmelCase )
312
__a :Dict = '0.18.2' from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
312
1
"""simple docstring""" import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __get__(self , _lowerCamelCase , _lowerCamelCase=None ): """simple docstring""" if obj is None: return self if self.fget is None: raise AttributeError("""unreadable attribute""" ) UpperCAmelCase__ : Tuple = """__cached_""" + self.fget.__name__ UpperCAmelCase__ : Dict = getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if cached is None: UpperCAmelCase__ : Optional[Any] = self.fget(_lowerCamelCase ) setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) return cached def a__ ( lowerCAmelCase ) -> Any: UpperCAmelCase__ : Optional[int] = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F"""invalid truth value {val!r}""" ) def a__ ( lowerCAmelCase ) -> Dict: if is_torch_fx_proxy(lowerCAmelCase ): return True if is_torch_available(): import torch if isinstance(lowerCAmelCase , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(lowerCAmelCase , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(lowerCAmelCase , (jnp.ndarray, Tracer) ): return True return isinstance(lowerCAmelCase , np.ndarray ) def a__ ( lowerCAmelCase ) -> Optional[Any]: return isinstance(lowerCAmelCase , np.ndarray ) def a__ ( lowerCAmelCase ) -> str: return _is_numpy(lowerCAmelCase ) def a__ ( lowerCAmelCase ) -> Any: import torch return isinstance(lowerCAmelCase , torch.Tensor ) def a__ ( lowerCAmelCase ) -> Union[str, Any]: return False if not is_torch_available() else _is_torch(lowerCAmelCase ) def a__ ( lowerCAmelCase ) -> Dict: import torch return isinstance(lowerCAmelCase , torch.device ) def a__ ( lowerCAmelCase ) -> List[Any]: return False if not is_torch_available() else _is_torch_device(lowerCAmelCase ) def a__ ( lowerCAmelCase ) -> Tuple: import torch if isinstance(lowerCAmelCase , lowerCAmelCase ): if hasattr(lowerCAmelCase , lowerCAmelCase ): UpperCAmelCase__ : Optional[Any] = getattr(lowerCAmelCase , lowerCAmelCase ) else: return False return isinstance(lowerCAmelCase , torch.dtype ) def a__ ( lowerCAmelCase ) -> Optional[int]: return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase ) def a__ ( lowerCAmelCase ) -> Optional[Any]: import tensorflow as tf return isinstance(lowerCAmelCase , tf.Tensor ) def a__ ( lowerCAmelCase ) -> Dict: return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase ) def a__ ( lowerCAmelCase ) -> List[str]: import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(lowerCAmelCase , """is_symbolic_tensor""" ): return tf.is_symbolic_tensor(lowerCAmelCase ) return type(lowerCAmelCase ) == tf.Tensor def a__ ( lowerCAmelCase ) -> Dict: return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase ) def a__ ( lowerCAmelCase ) -> List[str]: import jax.numpy as jnp # noqa: F811 return isinstance(lowerCAmelCase , jnp.ndarray ) def a__ ( lowerCAmelCase ) -> str: return False if not is_flax_available() else _is_jax(lowerCAmelCase ) def a__ ( lowerCAmelCase ) -> Tuple: if isinstance(lowerCAmelCase , (dict, UserDict) ): return {k: to_py_obj(lowerCAmelCase ) for k, v in obj.items()} elif isinstance(lowerCAmelCase , (list, tuple) ): return [to_py_obj(lowerCAmelCase ) for o in obj] elif is_tf_tensor(lowerCAmelCase ): return obj.numpy().tolist() elif is_torch_tensor(lowerCAmelCase ): return obj.detach().cpu().tolist() elif is_jax_tensor(lowerCAmelCase ): return np.asarray(lowerCAmelCase ).tolist() elif isinstance(lowerCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def a__ ( lowerCAmelCase ) -> Optional[int]: if isinstance(lowerCAmelCase , (dict, UserDict) ): return {k: to_numpy(lowerCAmelCase ) for k, v in obj.items()} elif isinstance(lowerCAmelCase , (list, tuple) ): return np.array(lowerCAmelCase ) elif is_tf_tensor(lowerCAmelCase ): return obj.numpy() elif is_torch_tensor(lowerCAmelCase ): return obj.detach().cpu().numpy() elif is_jax_tensor(lowerCAmelCase ): return np.asarray(lowerCAmelCase ) else: return obj class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def _a (self ): """simple docstring""" UpperCAmelCase__ : List[Any] = fields(self ) # Safety and consistency checks if not len(_lowerCamelCase ): raise ValueError(F"""{self.__class__.__name__} has no fields.""" ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""" ) UpperCAmelCase__ : List[Any] = getattr(self , class_fields[0].name ) UpperCAmelCase__ : Optional[int] = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(_lowerCamelCase ): if isinstance(_lowerCamelCase , _lowerCamelCase ): UpperCAmelCase__ : Optional[Any] = first_field.items() UpperCAmelCase__ : Optional[int] = True else: try: UpperCAmelCase__ : List[Any] = iter(_lowerCamelCase ) UpperCAmelCase__ : List[str] = True except TypeError: UpperCAmelCase__ : Dict = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(_lowerCamelCase ): if ( not isinstance(_lowerCamelCase , (list, tuple) ) or not len(_lowerCamelCase ) == 2 or not isinstance(element[0] , _lowerCamelCase ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute UpperCAmelCase__ : List[str] = first_field else: # If we have a mixed iterator, raise an error raise ValueError( F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" ) break setattr(self , element[0] , element[1] ) if element[1] is not None: UpperCAmelCase__ : str = element[1] elif first_field is not None: UpperCAmelCase__ : str = first_field else: for field in class_fields: UpperCAmelCase__ : Tuple = getattr(self , field.name ) if v is not None: UpperCAmelCase__ : Optional[int] = v def __delitem__(self , *_lowerCamelCase , **_lowerCamelCase ): """simple docstring""" raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" ) def _a (self , *_lowerCamelCase , **_lowerCamelCase ): """simple docstring""" raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" ) def _a (self , *_lowerCamelCase , **_lowerCamelCase ): """simple docstring""" raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" ) def _a (self , *_lowerCamelCase , **_lowerCamelCase ): """simple docstring""" raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" ) def __getitem__(self , _lowerCamelCase ): """simple docstring""" if isinstance(_lowerCamelCase , _lowerCamelCase ): UpperCAmelCase__ : List[str] = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__(self , _lowerCamelCase , _lowerCamelCase ): """simple docstring""" if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(_lowerCamelCase , _lowerCamelCase ) super().__setattr__(_lowerCamelCase , _lowerCamelCase ) def __setitem__(self , _lowerCamelCase , _lowerCamelCase ): """simple docstring""" super().__setitem__(_lowerCamelCase , _lowerCamelCase ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(_lowerCamelCase , _lowerCamelCase ) def _a (self ): """simple docstring""" return tuple(self[k] for k in self.keys() ) class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' @classmethod def _a (cls , _lowerCamelCase ): """simple docstring""" raise ValueError( F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" ) class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = 'longest' SCREAMING_SNAKE_CASE = 'max_length' SCREAMING_SNAKE_CASE = 'do_not_pad' class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = 'pt' SCREAMING_SNAKE_CASE = 'tf' SCREAMING_SNAKE_CASE = 'np' SCREAMING_SNAKE_CASE = 'jax' class lowerCamelCase : '''simple docstring''' def __init__(self , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Optional[int] = context_managers UpperCAmelCase__ : List[Any] = ExitStack() def __enter__(self ): """simple docstring""" for context_manager in self.context_managers: self.stack.enter_context(_lowerCamelCase ) def __exit__(self , *_lowerCamelCase , **_lowerCamelCase ): """simple docstring""" self.stack.__exit__(*_lowerCamelCase , **_lowerCamelCase ) def a__ ( lowerCAmelCase ) -> List[Any]: UpperCAmelCase__ : Optional[int] = infer_framework(lowerCAmelCase ) if framework == "tf": UpperCAmelCase__ : str = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase__ : Dict = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase__ : Tuple = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def a__ ( lowerCAmelCase ) -> Any: UpperCAmelCase__ : int = model_class.__name__ UpperCAmelCase__ : Union[str, Any] = infer_framework(lowerCAmelCase ) if framework == "tf": UpperCAmelCase__ : Optional[int] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase__ : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase__ : int = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def a__ ( lowerCAmelCase , lowerCAmelCase = "" , lowerCAmelCase = "." ) -> Optional[Any]: def _flatten_dict(lowerCAmelCase , lowerCAmelCase="" , lowerCAmelCase="." ): for k, v in d.items(): UpperCAmelCase__ : List[str] = str(lowerCAmelCase ) + delimiter + str(lowerCAmelCase ) if parent_key else k if v and isinstance(lowerCAmelCase , lowerCAmelCase ): yield from flatten_dict(lowerCAmelCase , lowerCAmelCase , delimiter=lowerCAmelCase ).items() else: yield key, v return dict(_flatten_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) ) @contextmanager def a__ ( lowerCAmelCase , lowerCAmelCase = False ) -> Any: if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def a__ ( lowerCAmelCase , lowerCAmelCase=None ) -> Tuple: if is_numpy_array(lowerCAmelCase ): return np.transpose(lowerCAmelCase , axes=lowerCAmelCase ) elif is_torch_tensor(lowerCAmelCase ): return array.T if axes is None else array.permute(*lowerCAmelCase ) elif is_tf_tensor(lowerCAmelCase ): import tensorflow as tf return tf.transpose(lowerCAmelCase , perm=lowerCAmelCase ) elif is_jax_tensor(lowerCAmelCase ): return jnp.transpose(lowerCAmelCase , axes=lowerCAmelCase ) else: raise ValueError(F"""Type not supported for transpose: {type(lowerCAmelCase )}.""" ) def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]: if is_numpy_array(lowerCAmelCase ): return np.reshape(lowerCAmelCase , lowerCAmelCase ) elif is_torch_tensor(lowerCAmelCase ): return array.reshape(*lowerCAmelCase ) elif is_tf_tensor(lowerCAmelCase ): import tensorflow as tf return tf.reshape(lowerCAmelCase , lowerCAmelCase ) elif is_jax_tensor(lowerCAmelCase ): return jnp.reshape(lowerCAmelCase , lowerCAmelCase ) else: raise ValueError(F"""Type not supported for reshape: {type(lowerCAmelCase )}.""" ) def a__ ( lowerCAmelCase , lowerCAmelCase=None ) -> List[Any]: if is_numpy_array(lowerCAmelCase ): return np.squeeze(lowerCAmelCase , axis=lowerCAmelCase ) elif is_torch_tensor(lowerCAmelCase ): return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase ) elif is_tf_tensor(lowerCAmelCase ): import tensorflow as tf return tf.squeeze(lowerCAmelCase , axis=lowerCAmelCase ) elif is_jax_tensor(lowerCAmelCase ): return jnp.squeeze(lowerCAmelCase , axis=lowerCAmelCase ) else: raise ValueError(F"""Type not supported for squeeze: {type(lowerCAmelCase )}.""" ) def a__ ( lowerCAmelCase , lowerCAmelCase ) -> List[str]: if is_numpy_array(lowerCAmelCase ): return np.expand_dims(lowerCAmelCase , lowerCAmelCase ) elif is_torch_tensor(lowerCAmelCase ): return array.unsqueeze(dim=lowerCAmelCase ) elif is_tf_tensor(lowerCAmelCase ): import tensorflow as tf return tf.expand_dims(lowerCAmelCase , axis=lowerCAmelCase ) elif is_jax_tensor(lowerCAmelCase ): return jnp.expand_dims(lowerCAmelCase , axis=lowerCAmelCase ) else: raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase )}.""" ) def a__ ( lowerCAmelCase ) -> Union[str, Any]: if is_numpy_array(lowerCAmelCase ): return np.size(lowerCAmelCase ) elif is_torch_tensor(lowerCAmelCase ): return array.numel() elif is_tf_tensor(lowerCAmelCase ): import tensorflow as tf return tf.size(lowerCAmelCase ) elif is_jax_tensor(lowerCAmelCase ): return array.size else: raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase )}.""" ) def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Dict: for key, value in auto_map.items(): if isinstance(lowerCAmelCase , (tuple, list) ): UpperCAmelCase__ : Optional[Any] = [F"""{repo_id}--{v}""" if (v is not None and """--""" not in v) else v for v in value] elif value is not None and "--" not in value: UpperCAmelCase__ : List[Any] = F"""{repo_id}--{value}""" return auto_map def a__ ( lowerCAmelCase ) -> List[Any]: for base_class in inspect.getmro(lowerCAmelCase ): UpperCAmelCase__ : List[Any] = base_class.__module__ UpperCAmelCase__ : Union[str, Any] = base_class.__name__ if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel": return "tf" elif module.startswith("""torch""" ) or name == "PreTrainedModel": return "pt" elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F"""Could not infer framework from class {model_class}.""" )
166
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _A = { """configuration_layoutlmv3""": [ """LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv3Config""", """LayoutLMv3OnnxConfig""", ], """processing_layoutlmv3""": ["""LayoutLMv3Processor"""], """tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ["""LayoutLMv3TokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ """LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv3ForQuestionAnswering""", """LayoutLMv3ForSequenceClassification""", """LayoutLMv3ForTokenClassification""", """LayoutLMv3Model""", """LayoutLMv3PreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ """TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLayoutLMv3ForQuestionAnswering""", """TFLayoutLMv3ForSequenceClassification""", """TFLayoutLMv3ForTokenClassification""", """TFLayoutLMv3Model""", """TFLayoutLMv3PreTrainedModel""", ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ["""LayoutLMv3FeatureExtractor"""] _A = ["""LayoutLMv3ImageProcessor"""] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys _A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
166
1
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]: snake_case_ = bnb_quantization_config.load_in_abit snake_case_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) snake_case_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: snake_case_ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: snake_case_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) snake_case_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: snake_case_ = [] snake_case_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft snake_case_ = load_in_abit snake_case_ = load_in_abit snake_case_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) snake_case_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype snake_case_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: snake_case_ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" ) snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): snake_case_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) snake_case_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): snake_case_ = True snake_case_ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if device_map is None: if torch.cuda.is_available(): snake_case_ = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) snake_case_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) snake_case_ = {} snake_case_ = special_dtypes snake_case_ = no_split_module_classes snake_case_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": snake_case_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == """balanced_low_0""") , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) snake_case_ = max_memory snake_case_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules snake_case_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if modules_to_not_convert is None: snake_case_ = [] snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]: snake_case_ = False for name, module in model.named_children(): if current_key_name is None: snake_case_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE ) snake_case_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: snake_case_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) snake_case_ = module.weight.data if module.bias is not None: snake_case_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = True if len(list(module.children() ) ) > 0: snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _a ( _SCREAMING_SNAKE_CASE ) -> Any: # Create a copy of the model with init_empty_weights(): snake_case_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` snake_case_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: snake_case_ = sum(_SCREAMING_SNAKE_CASE , [] ) snake_case_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model snake_case_ = False if hasattr(_SCREAMING_SNAKE_CASE , """base_model_prefix""" ): snake_case_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head snake_case_ = list(model.named_children() ) snake_case_ = [list_modules[-1][0]] # add last module together with tied weights snake_case_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) snake_case_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys snake_case_ = [""".weight""", """.bias"""] snake_case_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: snake_case_ = name.replace(_SCREAMING_SNAKE_CASE , """""" ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: return next(parameter.parameters() ).device def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) snake_case_ = param_name snake_case_ = model if "." in tensor_name: snake_case_ = tensor_name.split(""".""" ) for split in splits[:-1]: snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) snake_case_ = new_module snake_case_ = splits[-1] # offload weights snake_case_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , """SCB""" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """meta""" , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
347
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool: snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE ) # 2) Step through text searching for pattern snake_case_ , snake_case_ = 0, 0 # index into text, pattern while i < len(_SCREAMING_SNAKE_CASE ): if pattern[j] == text[i]: if j == (len(_SCREAMING_SNAKE_CASE ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: snake_case_ = failure[j - 1] continue i += 1 return False def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]: snake_case_ = [0] snake_case_ = 0 snake_case_ = 1 while j < len(_SCREAMING_SNAKE_CASE ): if pattern[i] == pattern[j]: i += 1 elif i > 0: snake_case_ = failure[i - 1] continue j += 1 failure.append(_SCREAMING_SNAKE_CASE ) return failure if __name__ == "__main__": # Test 1) __SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12' __SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc' __SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) __SCREAMING_SNAKE_CASE : int = 'ABABX' __SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX' assert kmp(pattern, text) # Test 3) __SCREAMING_SNAKE_CASE : Any = 'AAAB' __SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB' assert kmp(pattern, text) # Test 4) __SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy' __SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy' assert kmp(pattern, text) # Test 5) __SCREAMING_SNAKE_CASE : Any = 'aabaabaaa' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
347
1
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __snake_case : Any = 42 __snake_case : Tuple = 42 class UpperCamelCase__ ( nn.Module ): '''simple docstring''' __snake_case : Optional[int] = 42 __snake_case : Optional[int] = (16, 32, 96, 256) __snake_case : List[str] = jnp.floataa def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = nn.Conv( self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) SCREAMING_SNAKE_CASE = [] for i in range(len(self.block_out_channels ) - 1 ): SCREAMING_SNAKE_CASE = self.block_out_channels[i] SCREAMING_SNAKE_CASE = self.block_out_channels[i + 1] SCREAMING_SNAKE_CASE = nn.Conv( __a ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) blocks.append(__a ) SCREAMING_SNAKE_CASE = nn.Conv( __a ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) blocks.append(__a ) SCREAMING_SNAKE_CASE = blocks SCREAMING_SNAKE_CASE = nn.Conv( self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) def __call__( self : Any ,lowerCamelCase__ : int ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.conv_in(__a ) SCREAMING_SNAKE_CASE = nn.silu(__a ) for block in self.blocks: SCREAMING_SNAKE_CASE = block(__a ) SCREAMING_SNAKE_CASE = nn.silu(__a ) SCREAMING_SNAKE_CASE = self.conv_out(__a ) return embedding @flax_register_to_config class UpperCamelCase__ ( nn.Module , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __snake_case : List[Any] = 32 __snake_case : Dict = 4 __snake_case : Optional[Any] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) __snake_case : List[str] = False __snake_case : Union[str, Any] = (320, 640, 1280, 1280) __snake_case : Union[str, Any] = 2 __snake_case : Optional[int] = 8 __snake_case : str = None __snake_case : Any = 1280 __snake_case : Union[str, Any] = 0.0 __snake_case : List[str] = False __snake_case : List[Any] = jnp.floataa __snake_case : Optional[Any] = True __snake_case : Any = 0 __snake_case : int = "rgb" __snake_case : str = (16, 32, 96, 256) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : jax.random.KeyArray ) -> FrozenDict: '''simple docstring''' SCREAMING_SNAKE_CASE = (1, self.in_channels, self.sample_size, self.sample_size) SCREAMING_SNAKE_CASE = jnp.zeros(__a ,dtype=jnp.floataa ) SCREAMING_SNAKE_CASE = jnp.ones((1,) ,dtype=jnp.intaa ) SCREAMING_SNAKE_CASE = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa ) SCREAMING_SNAKE_CASE = (1, 3, self.sample_size * 8, self.sample_size * 8) SCREAMING_SNAKE_CASE = jnp.zeros(__a ,dtype=jnp.floataa ) SCREAMING_SNAKE_CASE = jax.random.split(__a ) SCREAMING_SNAKE_CASE = {'params': params_rng, 'dropout': dropout_rng} return self.init(__a ,__a ,__a ,__a ,__a )["params"] def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = self.block_out_channels SCREAMING_SNAKE_CASE = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. SCREAMING_SNAKE_CASE = self.num_attention_heads or self.attention_head_dim # input SCREAMING_SNAKE_CASE = nn.Conv( block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) # time SCREAMING_SNAKE_CASE = FlaxTimesteps( block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift ) SCREAMING_SNAKE_CASE = FlaxTimestepEmbedding(__a ,dtype=self.dtype ) SCREAMING_SNAKE_CASE = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,) SCREAMING_SNAKE_CASE = self.only_cross_attention if isinstance(__a ,__a ): SCREAMING_SNAKE_CASE = (only_cross_attention,) * len(self.down_block_types ) if isinstance(__a ,__a ): SCREAMING_SNAKE_CASE = (num_attention_heads,) * len(self.down_block_types ) # down SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = block_out_channels[0] SCREAMING_SNAKE_CASE = nn.Conv( __a ,kernel_size=(1, 1) ,padding="""VALID""" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(__a ) for i, down_block_type in enumerate(self.down_block_types ): SCREAMING_SNAKE_CASE = output_channel SCREAMING_SNAKE_CASE = block_out_channels[i] SCREAMING_SNAKE_CASE = i == len(__a ) - 1 if down_block_type == "CrossAttnDownBlock2D": SCREAMING_SNAKE_CASE = FlaxCrossAttnDownBlockaD( in_channels=__a ,out_channels=__a ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,) else: SCREAMING_SNAKE_CASE = FlaxDownBlockaD( in_channels=__a ,out_channels=__a ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,) down_blocks.append(__a ) for _ in range(self.layers_per_block ): SCREAMING_SNAKE_CASE = nn.Conv( __a ,kernel_size=(1, 1) ,padding="""VALID""" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(__a ) if not is_final_block: SCREAMING_SNAKE_CASE = nn.Conv( __a ,kernel_size=(1, 1) ,padding="""VALID""" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(__a ) SCREAMING_SNAKE_CASE = down_blocks SCREAMING_SNAKE_CASE = controlnet_down_blocks # mid SCREAMING_SNAKE_CASE = block_out_channels[-1] SCREAMING_SNAKE_CASE = FlaxUNetMidBlockaDCrossAttn( in_channels=__a ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,) SCREAMING_SNAKE_CASE = nn.Conv( __a ,kernel_size=(1, 1) ,padding="""VALID""" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) def __call__( self : str ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : float = 1.0 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = False ,) -> Union[FlaxControlNetOutput, Tuple]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.controlnet_conditioning_channel_order if channel_order == "bgr": SCREAMING_SNAKE_CASE = jnp.flip(__a ,axis=1 ) # 1. time if not isinstance(__a ,jnp.ndarray ): SCREAMING_SNAKE_CASE = jnp.array([timesteps] ,dtype=jnp.intaa ) elif isinstance(__a ,jnp.ndarray ) and len(timesteps.shape ) == 0: SCREAMING_SNAKE_CASE = timesteps.astype(dtype=jnp.floataa ) SCREAMING_SNAKE_CASE = jnp.expand_dims(__a ,0 ) SCREAMING_SNAKE_CASE = self.time_proj(__a ) SCREAMING_SNAKE_CASE = self.time_embedding(__a ) # 2. pre-process SCREAMING_SNAKE_CASE = jnp.transpose(__a ,(0, 2, 3, 1) ) SCREAMING_SNAKE_CASE = self.conv_in(__a ) SCREAMING_SNAKE_CASE = jnp.transpose(__a ,(0, 2, 3, 1) ) SCREAMING_SNAKE_CASE = self.controlnet_cond_embedding(__a ) sample += controlnet_cond # 3. down SCREAMING_SNAKE_CASE = (sample,) for down_block in self.down_blocks: if isinstance(__a ,__a ): SCREAMING_SNAKE_CASE = down_block(__a ,__a ,__a ,deterministic=not train ) else: SCREAMING_SNAKE_CASE = down_block(__a ,__a ,deterministic=not train ) down_block_res_samples += res_samples # 4. mid SCREAMING_SNAKE_CASE = self.mid_block(__a ,__a ,__a ,deterministic=not train ) # 5. contronet blocks SCREAMING_SNAKE_CASE = () for down_block_res_sample, controlnet_block in zip(__a ,self.controlnet_down_blocks ): SCREAMING_SNAKE_CASE = controlnet_block(__a ) controlnet_down_block_res_samples += (down_block_res_sample,) SCREAMING_SNAKE_CASE = controlnet_down_block_res_samples SCREAMING_SNAKE_CASE = self.controlnet_mid_block(__a ) # 6. scaling SCREAMING_SNAKE_CASE = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=__a ,mid_block_res_sample=__a )
350
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any]=3 ,lowerCamelCase__ : List[str]=32 ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : str=10 ,lowerCamelCase__ : Any=[10, 20, 30, 40] ,lowerCamelCase__ : Optional[Any]=[1, 1, 2, 1] ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Tuple="relu" ,lowerCamelCase__ : Dict=3 ,lowerCamelCase__ : Optional[int]=None ,) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = embeddings_size SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = scope SCREAMING_SNAKE_CASE = len(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]: '''simple docstring''' return RegNetConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Union[str, Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = FlaxRegNetModel(config=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[Any] ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = FlaxRegNetForImageClassification(config=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = config_and_inputs SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () __snake_case : Tuple = False __snake_case : int = False __snake_case : Tuple = False def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = FlaxRegNetModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]: '''simple docstring''' return def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) @unittest.skip(reason="""RegNet does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: '''simple docstring''' pass @unittest.skip(reason="""RegNet does not support input and output embeddings""" ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int: '''simple docstring''' def check_hidden_states_output(lowerCamelCase__ : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int] ): SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) ) SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase__ ) ,expected_num_stages + 1 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE = True check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ ) @jax.jit def model_jitted(lowerCamelCase__ : Dict ,**lowerCamelCase__ : Optional[Any] ): return model(pixel_values=lowerCamelCase__ ,**lowerCamelCase__ ) with self.subTest("""JIT Enabled""" ): SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase__ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase__ ).to_tuple() self.assertEqual(len(lowerCamelCase__ ) ,len(lowerCamelCase__ ) ) for jitted_output, output in zip(lowerCamelCase__ ,lowerCamelCase__ ): self.assertEqual(jitted_output.shape ,output.shape ) def __lowercase ( ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_flax class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : int ) -> str: '''simple docstring''' return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" ) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase__ ,return_tensors="""np""" ) SCREAMING_SNAKE_CASE = model(**lowerCamelCase__ ) # verify the logits SCREAMING_SNAKE_CASE = (1, 1000) self.assertEqual(outputs.logits.shape ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = jnp.array([-0.4180, -1.5051, -3.4836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
193
0
'''simple docstring''' from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def _lowerCamelCase ( lowercase : bool = True , *lowercase : Any , **lowercase : List[Any] ) -> Any: if not is_tqdm_available(): raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." ) _a = False if main_process_only: _a = PartialState().local_process_index == 0 return _tqdm(*lowercase , **lowercase , disable=lowercase )
63
'''simple docstring''' import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class a__( unittest.TestCase ): '''simple docstring''' @property def a_ ( self): """simple docstring""" torch.manual_seed(0) lowerCAmelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def a_ ( self): """simple docstring""" lowerCAmelCase = self.dummy_uncond_unet lowerCAmelCase = PNDMScheduler() lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase) pndm.to(__lowerCAmelCase) pndm.set_progress_bar_config(disable=__lowerCAmelCase) lowerCAmelCase = torch.manual_seed(0) lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""").images lowerCAmelCase = torch.manual_seed(0) lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=__lowerCAmelCase)[0] lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 @slow @require_torch class a__( unittest.TestCase ): '''simple docstring''' def a_ ( self): """simple docstring""" lowerCAmelCase = """google/ddpm-cifar10-32""" lowerCAmelCase = UNetaDModel.from_pretrained(__lowerCAmelCase) lowerCAmelCase = PNDMScheduler() lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase) pndm.to(__lowerCAmelCase) pndm.set_progress_bar_config(disable=__lowerCAmelCase) lowerCAmelCase = torch.manual_seed(0) lowerCAmelCase = pndm(generator=__lowerCAmelCase , output_type="""numpy""").images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
272
0
import argparse import requests import torch from PIL import Image from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor def __A ( _lowercase ): '''simple docstring''' if "cls_token" in name: _A = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' ) if "mask_token" in name: _A = name.replace('''mask_token''' , '''decoder.mask_token''' ) if "decoder_pos_embed" in name: _A = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' ) if "pos_embed" in name and "decoder" not in name: _A = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' ) if "patch_embed.proj" in name: _A = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: _A = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' ) if "decoder_blocks" in name: _A = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' ) if "blocks" in name: _A = name.replace('''blocks''' , '''vit.encoder.layer''' ) if "attn.proj" in name: _A = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: _A = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: _A = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: _A = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: _A = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: _A = name.replace('''mlp.fc2''' , '''output.dense''' ) if "decoder_embed" in name: _A = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' ) if "decoder_norm" in name: _A = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' ) if "decoder_pred" in name: _A = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' ) if "norm.weight" in name and "decoder" not in name: _A = name.replace('''norm.weight''' , '''vit.layernorm.weight''' ) if "norm.bias" in name and "decoder" not in name: _A = name.replace('''norm.bias''' , '''vit.layernorm.bias''' ) return name def __A ( _lowercase , _lowercase ): '''simple docstring''' for key in orig_state_dict.copy().keys(): _A = orig_state_dict.pop(_lowercase ) if "qkv" in key: _A = key.split('''.''' ) _A = int(key_split[1] ) if "decoder_blocks" in key: _A = config.decoder_hidden_size _A = '''decoder.decoder_layers.''' if "weight" in key: _A = val[:dim, :] _A = val[dim : dim * 2, :] _A = val[-dim:, :] elif "bias" in key: _A = val[:dim] _A = val[dim : dim * 2] _A = val[-dim:] else: _A = config.hidden_size _A = '''vit.encoder.layer.''' if "weight" in key: _A = val[:dim, :] _A = val[dim : dim * 2, :] _A = val[-dim:, :] elif "bias" in key: _A = val[:dim] _A = val[dim : dim * 2] _A = val[-dim:] else: _A = val return orig_state_dict def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = ViTMAEConfig() if "large" in checkpoint_url: _A = 10_24 _A = 40_96 _A = 24 _A = 16 elif "huge" in checkpoint_url: _A = 14 _A = 12_80 _A = 51_20 _A = 32 _A = 16 _A = ViTMAEForPreTraining(_lowercase ) _A = torch.hub.load_state_dict_from_url(_lowercase , map_location='''cpu''' )['''model'''] _A = ViTMAEImageProcessor(size=config.image_size ) _A = convert_state_dict(_lowercase , _lowercase ) model.load_state_dict(_lowercase ) model.eval() _A = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg''' _A = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ) _A = ViTMAEImageProcessor(size=config.image_size ) _A = image_processor(images=_lowercase , return_tensors='''pt''' ) # forward pass torch.manual_seed(2 ) _A = model(**_lowercase ) _A = outputs.logits if "large" in checkpoint_url: _A = torch.tensor( [[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] ) elif "huge" in checkpoint_url: _A = torch.tensor( [[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] ) else: _A = torch.tensor( [[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] ) # verify logits assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowercase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_lowercase ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth', type=str, help='URL of the checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __A = parser.parse_args() convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
75
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers __A = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
75
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A_ : int = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Any = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys A_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
333
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 A_ : List[str] = sys.version_info >= (3, 10) def __a ( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> str: '''simple docstring''' return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE ) @dataclass class A_ : '''simple docstring''' a__ = 42 a__ = 42 a__ = 42 a__ = 42 @dataclass class A_ : '''simple docstring''' a__ = 42 a__ = field(default="toto" , metadata={"help": "help message"} ) @dataclass class A_ : '''simple docstring''' a__ = False a__ = True a__ = None class A_ ( _a ): '''simple docstring''' a__ = "titi" a__ = "toto" class A_ ( _a ): '''simple docstring''' a__ = "titi" a__ = "toto" a__ = 42 @dataclass class A_ : '''simple docstring''' a__ = "toto" def lowerCAmelCase_ (self ) -> Tuple: __UpperCAmelCase = BasicEnum(self.foo ) @dataclass class A_ : '''simple docstring''' a__ = "toto" def lowerCAmelCase_ (self ) -> Dict: __UpperCAmelCase = MixedTypeEnum(self.foo ) @dataclass class A_ : '''simple docstring''' a__ = None a__ = field(default=_a , metadata={"help": "help message"} ) a__ = None a__ = list_field(default=[] ) a__ = list_field(default=[] ) @dataclass class A_ : '''simple docstring''' a__ = list_field(default=[] ) a__ = list_field(default=[1, 2, 3] ) a__ = list_field(default=["Hallo", "Bonjour", "Hello"] ) a__ = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class A_ : '''simple docstring''' a__ = field() a__ = field() a__ = field() def lowerCAmelCase_ (self ) -> Union[str, Any]: __UpperCAmelCase = BasicEnum(self.required_enum ) @dataclass class A_ : '''simple docstring''' a__ = 42 a__ = field() a__ = None a__ = field(default="toto" , metadata={"help": "help message"} ) a__ = list_field(default=["Hallo", "Bonjour", "Hello"] ) if is_python_no_less_than_3_10: @dataclass class A_ : '''simple docstring''' a__ = False a__ = True a__ = None @dataclass class A_ : '''simple docstring''' a__ = None a__ = field(default=_a , metadata={"help": "help message"} ) a__ = None a__ = list_field(default=[] ) a__ = list_field(default=[] ) class A_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Optional[int]: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): __UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''} __UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , lowercase__ ) and yy.get('''choices''' , lowercase__ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](lowercase__ ) , yy['''type'''](lowercase__ ) ) del xx["type"], yy["type"] self.assertEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Union[str, Any]: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ ) expected.add_argument('''--bar''' , type=lowercase__ , required=lowercase__ ) expected.add_argument('''--baz''' , type=lowercase__ , required=lowercase__ ) expected.add_argument('''--flag''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' ) self.argparsersEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((__UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(lowercase__ , look_for_args_file=lowercase__ ) self.assertFalse(example.flag ) def lowerCAmelCase_ (self ) -> Optional[Any]: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=42 , type=lowercase__ ) expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' ) self.argparsersEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Union[str, Any]: __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' ) expected.add_argument('''--baz''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=lowercase__ , dest='''baz''' ) expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ ) __UpperCAmelCase = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(lowercase__ ) for dataclass_type in dataclass_types: __UpperCAmelCase = HfArgumentParser(lowercase__ ) self.argparsersEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = parser.parse_args([] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) ) def lowerCAmelCase_ (self ) -> Dict: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) __UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) __UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) __UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowerCAmelCase_ (self ) -> str: @dataclass class A_ : '''simple docstring''' a__ = "toto" __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) __UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) def lowerCAmelCase_ (self ) -> str: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=lowercase__ ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=lowercase__ ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=lowercase__ ) self.argparsersEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = parser.parse_args([] ) self.assertEqual( lowercase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) __UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(lowercase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def lowerCAmelCase_ (self ) -> List[str]: __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=lowercase__ , type=lowercase__ ) expected.add_argument('''--bar''' , default=lowercase__ , type=lowercase__ , help='''help message''' ) expected.add_argument('''--baz''' , default=lowercase__ , type=lowercase__ ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=lowercase__ ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=lowercase__ ) __UpperCAmelCase = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(lowercase__ ) for dataclass_type in dataclass_types: __UpperCAmelCase = HfArgumentParser(lowercase__ ) self.argparsersEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = parser.parse_args([] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , bar=lowercase__ , baz=lowercase__ , ces=[] , des=[] ) ) __UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(lowercase__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def lowerCAmelCase_ (self ) -> Tuple: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=lowercase__ , required=lowercase__ ) expected.add_argument('''--required_str''' , type=lowercase__ , required=lowercase__ ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , ) self.argparsersEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Optional[Any]: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , ) expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ ) expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ ) self.argparsersEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Optional[int]: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } __UpperCAmelCase = parser.parse_dict(lowercase__ )[0] __UpperCAmelCase = BasicExample(**lowercase__ ) self.assertEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Tuple: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(lowercase__ , parser.parse_dict , lowercase__ , allow_extra_keys=lowercase__ ) def lowerCAmelCase_ (self ) -> Any: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: __UpperCAmelCase = os.path.join(lowercase__ , '''temp_json''' ) os.mkdir(lowercase__ ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(lowercase__ , lowercase__ ) __UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] __UpperCAmelCase = BasicExample(**lowercase__ ) self.assertEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> List[Any]: __UpperCAmelCase = HfArgumentParser(lowercase__ ) __UpperCAmelCase = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: __UpperCAmelCase = os.path.join(lowercase__ , '''temp_yaml''' ) os.mkdir(lowercase__ ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(lowercase__ , lowercase__ ) __UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] __UpperCAmelCase = BasicExample(**lowercase__ ) self.assertEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Tuple: __UpperCAmelCase = HfArgumentParser(lowercase__ ) self.assertIsNotNone(lowercase__ )
333
1
"""simple docstring""" UpperCAmelCase__ = {"""a""": ["""c""", """b"""], """b""": ["""d""", """e"""], """c""": [], """d""": [], """e""": []} UpperCAmelCase__ = ["""a""", """b""", """c""", """d""", """e"""] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = start # add current to visited visited.append(lowercase__ ) _UpperCAmelCase = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: _UpperCAmelCase = topological_sort(lowercase__ ,lowercase__ ,lowercase__ ) # if all neighbors visited add current to sort sort.append(lowercase__ ) # if all vertices haven't been visited select a new one to visit if len(lowercase__ ) != len(lowercase__ ): for vertice in vertices: if vertice not in visited: _UpperCAmelCase = topological_sort(lowercase__ ,lowercase__ ,lowercase__ ) # return sort return sort if __name__ == "__main__": UpperCAmelCase__ = topological_sort("""a""", [], []) print(sort)
351
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowercase ,lowercase=False ): """simple docstring""" _UpperCAmelCase = [] # fmt: off # stem: rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") ) rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") ) rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") ) # backbone rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") ) rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") ) rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) # fmt: on return rename_keys def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: _UpperCAmelCase = """""" else: _UpperCAmelCase = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) _UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _UpperCAmelCase = in_proj_weight[ : config.hidden_size, : ] _UpperCAmelCase = in_proj_bias[: config.hidden_size] _UpperCAmelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _UpperCAmelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _UpperCAmelCase = in_proj_weight[ -config.hidden_size :, : ] _UpperCAmelCase = in_proj_bias[-config.hidden_size :] def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(lowercase ,lowercase ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = dct.pop(lowercase ) _UpperCAmelCase = val def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw ) return im @torch.no_grad() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ): """simple docstring""" _UpperCAmelCase = BitConfig( global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,) _UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 ) _UpperCAmelCase = False # load original model from timm _UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _UpperCAmelCase = timm_model.state_dict() if base_model: remove_classification_head_(lowercase ) _UpperCAmelCase = create_rename_keys(lowercase ,lowercase ) for src, dest in rename_keys: rename_key(lowercase ,lowercase ,lowercase ) read_in_q_k_v(lowercase ,lowercase ,lowercase ) _UpperCAmelCase = """huggingface/label-files""" _UpperCAmelCase = """imagenet-1k-id2label.json""" _UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) ) _UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _UpperCAmelCase = ViTHybridModel(lowercase ).eval() else: _UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval() model.load_state_dict(lowercase ) # create image processor _UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) ) _UpperCAmelCase = transform.transforms _UpperCAmelCase = { """bilinear""": PILImageResampling.BILINEAR, """bicubic""": PILImageResampling.BICUBIC, """nearest""": PILImageResampling.NEAREST, } _UpperCAmelCase = ViTHybridImageProcessor( do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,) _UpperCAmelCase = prepare_img() _UpperCAmelCase = transform(lowercase ).unsqueeze(0 ) _UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values # verify pixel values assert torch.allclose(lowercase ,lowercase ) # verify logits with torch.no_grad(): _UpperCAmelCase = model(lowercase ) _UpperCAmelCase = outputs.logits print("""Predicted class:""" ,logits.argmax(-1 ).item() ) if base_model: _UpperCAmelCase = timm_model.forward_features(lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 ) else: _UpperCAmelCase = timm_model(lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(lowercase ).mkdir(exist_ok=lowercase ) print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowercase ) if push_to_hub: print(f'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(f'''ybelkada/{vit_name}''' ) processor.push_to_hub(f'''ybelkada/{vit_name}''' ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_r50_s16_384""", type=str, help="""Name of the hybrid ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) UpperCAmelCase__ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
30
0
from math import pi def __lowercase ( _A , _A ) -> float: return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
245
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ : Dict = logging.get_logger(__name__) UpperCAmelCase__ : str = { """facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""", } class a__ ( UpperCAmelCase ): """simple docstring""" UpperCAmelCase__ : List[Any] ="""nllb-moe""" UpperCAmelCase__ : Any =["""past_key_values"""] UpperCAmelCase__ : Dict ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=1_2_8_1_1_2 , UpperCAmelCase__ : Tuple=1_0_2_4 , UpperCAmelCase__ : str=1_2 , UpperCAmelCase__ : int=4_0_9_6 , UpperCAmelCase__ : Dict=1_6 , UpperCAmelCase__ : Union[str, Any]=1_2 , UpperCAmelCase__ : int=4_0_9_6 , UpperCAmelCase__ : Optional[Any]=1_6 , UpperCAmelCase__ : Union[str, Any]=0.05 , UpperCAmelCase__ : Any=0.05 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Union[str, Any]="relu" , UpperCAmelCase__ : Dict=1_0_2_4 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="float32" , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Union[str, Any]=1_2_8 , UpperCAmelCase__ : Any=6_4 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Optional[Any]=0.0_01 , UpperCAmelCase__ : Optional[Any]=0.0_01 , UpperCAmelCase__ : Dict="all" , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : List[str]=1.0 , UpperCAmelCase__ : Optional[int]=0.2 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Tuple=False , **UpperCAmelCase__ : Union[str, Any] , ) ->int: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = vocab_size SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings SCREAMING_SNAKE_CASE : str = d_model SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers SCREAMING_SNAKE_CASE : str = encoder_attention_heads SCREAMING_SNAKE_CASE : List[str] = decoder_ffn_dim SCREAMING_SNAKE_CASE : str = decoder_layers SCREAMING_SNAKE_CASE : Optional[Any] = decoder_attention_heads SCREAMING_SNAKE_CASE : Optional[Any] = dropout SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout SCREAMING_SNAKE_CASE : str = activation_dropout SCREAMING_SNAKE_CASE : Dict = activation_function SCREAMING_SNAKE_CASE : str = init_std SCREAMING_SNAKE_CASE : Tuple = encoder_layerdrop SCREAMING_SNAKE_CASE : Tuple = decoder_layerdrop SCREAMING_SNAKE_CASE : List[str] = use_cache SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layers SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True SCREAMING_SNAKE_CASE : Tuple = router_z_loss_coef SCREAMING_SNAKE_CASE : Tuple = router_aux_loss_coef SCREAMING_SNAKE_CASE : List[Any] = decoder_sparse_step SCREAMING_SNAKE_CASE : Any = encoder_sparse_step SCREAMING_SNAKE_CASE : Tuple = num_experts SCREAMING_SNAKE_CASE : Optional[int] = expert_capacity SCREAMING_SNAKE_CASE : int = router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" ) SCREAMING_SNAKE_CASE : Optional[int] = router_dtype SCREAMING_SNAKE_CASE : Any = router_ignore_padding_tokens SCREAMING_SNAKE_CASE : Any = batch_prioritized_routing SCREAMING_SNAKE_CASE : Optional[Any] = second_expert_policy SCREAMING_SNAKE_CASE : Any = normalize_router_prob_before_dropping SCREAMING_SNAKE_CASE : Tuple = moe_eval_capacity_token_fraction SCREAMING_SNAKE_CASE : int = moe_token_dropout SCREAMING_SNAKE_CASE : Optional[int] = output_router_logits super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
245
1
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" @register_to_config def __init__( self : List[Any] , lowerCAmelCase__ : int = 7_6_8 , ) -> int: """simple docstring""" super().__init__() _UpperCAmelCase : int = nn.Parameter(torch.zeros(1 , lowerCAmelCase__ ) ) _UpperCAmelCase : str = nn.Parameter(torch.ones(1 , lowerCAmelCase__ ) ) def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Optional[Union[str, torch.device]] = None , lowerCAmelCase__ : Optional[torch.dtype] = None , ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Any = nn.Parameter(self.mean.to(lowerCAmelCase__ ).to(lowerCAmelCase__ ) ) _UpperCAmelCase : Optional[Any] = nn.Parameter(self.std.to(lowerCAmelCase__ ).to(lowerCAmelCase__ ) ) return self def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Union[str, Any] ) -> int: """simple docstring""" _UpperCAmelCase : str = (embeds - self.mean) * 1.0 / self.std return embeds def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Tuple ) -> str: """simple docstring""" _UpperCAmelCase : Any = (embeds * self.std) + self.mean return embeds
358
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') __a = logging.getLogger(__name__) @dataclass class A__ : """simple docstring""" UpperCamelCase_ : str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCamelCase_ : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) @dataclass class A__ : """simple docstring""" UpperCamelCase_ : Optional[str] = field(default=UpperCamelCase , metadata={'''help''': '''The input training data file (a text file).'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. If passed, sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={ '''help''': ( '''Whether to pad all samples to the maximum sentence length. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch. More ''' '''efficient on GPU but very bad for TPU.''' ) } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def _lowerCAmelCase ( self : Any ) -> Any: """simple docstring""" if self.train_file is not None: _UpperCAmelCase : List[Any] = self.train_file.split("." )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: _UpperCAmelCase : List[str] = self.validation_file.split("." )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class A__ : """simple docstring""" UpperCamelCase_ : PreTrainedTokenizerBase UpperCamelCase_ : Union[bool, str, PaddingStrategy] = True UpperCamelCase_ : Optional[int] = None UpperCamelCase_ : Optional[int] = None def __call__( self : List[Any] , lowerCAmelCase__ : List[str] ) -> List[str]: """simple docstring""" _UpperCAmelCase : int = "label" if "label" in features[0].keys() else "labels" _UpperCAmelCase : Dict = [feature.pop(lowerCAmelCase__ ) for feature in features] _UpperCAmelCase : str = len(lowerCAmelCase__ ) _UpperCAmelCase : int = len(features[0]["input_ids"] ) _UpperCAmelCase : str = [ [{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase__ )] for feature in features ] _UpperCAmelCase : List[str] = list(chain(*lowerCAmelCase__ ) ) _UpperCAmelCase : Any = self.tokenizer.pad( lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten _UpperCAmelCase : Any = {k: v.view(lowerCAmelCase__ , lowerCAmelCase__ , -1 ) for k, v in batch.items()} # Add back labels _UpperCAmelCase : List[str] = torch.tensor(lowerCAmelCase__ , dtype=torch.intaa ) return batch def __UpperCAmelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag", a_, a_ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase : Optional[int] = training_args.get_process_log_level() logger.setLevel(a_ ) datasets.utils.logging.set_verbosity(a_ ) transformers.utils.logging.set_verbosity(a_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. _UpperCAmelCase : Any = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: _UpperCAmelCase : Union[str, Any] = {} if data_args.train_file is not None: _UpperCAmelCase : str = data_args.train_file if data_args.validation_file is not None: _UpperCAmelCase : Optional[Any] = data_args.validation_file _UpperCAmelCase : Dict = data_args.train_file.split("." )[-1] _UpperCAmelCase : Optional[int] = load_dataset( a_, data_files=a_, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: # Downloading and loading the swag dataset from the hub. _UpperCAmelCase : Dict = load_dataset( "swag", "regular", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) _UpperCAmelCase : Any = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) _UpperCAmelCase : str = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=a_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # When using your own dataset or a different dataset from swag, you will probably need to change this. _UpperCAmelCase : Optional[Any] = [f"""ending{i}""" for i in range(4 )] _UpperCAmelCase : List[Any] = "sent1" _UpperCAmelCase : Optional[int] = "sent2" if data_args.max_seq_length is None: _UpperCAmelCase : List[str] = tokenizer.model_max_length if max_seq_length > 1_024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) _UpperCAmelCase : Dict = 1_024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) _UpperCAmelCase : Dict = min(data_args.max_seq_length, tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(a_: Union[str, Any] ): _UpperCAmelCase : Optional[int] = [[context] * 4 for context in examples[context_name]] _UpperCAmelCase : Tuple = examples[question_header_name] _UpperCAmelCase : Optional[Any] = [ [f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(a_ ) ] # Flatten out _UpperCAmelCase : List[str] = list(chain(*a_ ) ) _UpperCAmelCase : Dict = list(chain(*a_ ) ) # Tokenize _UpperCAmelCase : List[Any] = tokenizer( a_, a_, truncation=a_, max_length=a_, padding="max_length" if data_args.pad_to_max_length else False, ) # Un-flatten return {k: [v[i : i + 4] for i in range(0, len(a_ ), 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) _UpperCAmelCase : int = raw_datasets["train"] if data_args.max_train_samples is not None: _UpperCAmelCase : Optional[Any] = min(len(a_ ), data_args.max_train_samples ) _UpperCAmelCase : List[Any] = train_dataset.select(range(a_ ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): _UpperCAmelCase : Union[str, Any] = train_dataset.map( a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) _UpperCAmelCase : Dict = raw_datasets["validation"] if data_args.max_eval_samples is not None: _UpperCAmelCase : int = min(len(a_ ), data_args.max_eval_samples ) _UpperCAmelCase : List[str] = eval_dataset.select(range(a_ ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): _UpperCAmelCase : Optional[int] = eval_dataset.map( a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) # Data collator _UpperCAmelCase : Tuple = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=a_, pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(a_: Tuple ): _UpperCAmelCase , _UpperCAmelCase : Tuple = eval_predictions _UpperCAmelCase : Union[str, Any] = np.argmax(a_, axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer _UpperCAmelCase : Any = Trainer( model=a_, args=a_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=a_, data_collator=a_, compute_metrics=a_, ) # Training if training_args.do_train: _UpperCAmelCase : Optional[Any] = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase : List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase : List[str] = last_checkpoint _UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=a_ ) trainer.save_model() # Saves the tokenizer too for easy upload _UpperCAmelCase : str = train_result.metrics _UpperCAmelCase : List[str] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ ) ) _UpperCAmelCase : Union[str, Any] = min(a_, len(a_ ) ) trainer.log_metrics("train", a_ ) trainer.save_metrics("train", a_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) _UpperCAmelCase : List[Any] = trainer.evaluate() _UpperCAmelCase : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ ) _UpperCAmelCase : Tuple = min(a_, len(a_ ) ) trainer.log_metrics("eval", a_ ) trainer.save_metrics("eval", a_ ) _UpperCAmelCase : int = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**a_ ) else: trainer.create_model_card(**a_ ) def __UpperCAmelCase ( a_: int ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
17
0
"""simple docstring""" import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger UpperCamelCase_ = get_logger(__name__) UpperCamelCase_ = R'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n' class snake_case : @add_start_docstrings(__UpperCAmelCase) def __call__( self , __UpperCAmelCase , __UpperCAmelCase) ->jnp.ndarray: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''') class snake_case : @add_start_docstrings(__UpperCAmelCase) def __call__( self , __UpperCAmelCase , __UpperCAmelCase) ->jnp.ndarray: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''') class snake_case ( SCREAMING_SNAKE_CASE_ ): @add_start_docstrings(__UpperCAmelCase) def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) ->jnp.ndarray: for processor in self: a_ = inspect.signature(processor.__call__).parameters if len(__UpperCAmelCase) > 3: if not all(arg in kwargs for arg in list(function_args.keys())[2:]): raise ValueError( F'''Make sure that all the required parameters: {list(function_args.keys())} for ''' F'''{processor.__class__} are passed to the logits processor.''') a_ = processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) else: a_ = processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) return scores class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , __UpperCAmelCase) ->Any: if not isinstance(__UpperCAmelCase , __UpperCAmelCase) or not (temperature > 0): raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''') a_ = temperature def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->jnp.ndarray: a_ = scores / self.temperature return scores class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase = -float("Inf") , __UpperCAmelCase = 1) ->Union[str, Any]: if not isinstance(__UpperCAmelCase , __UpperCAmelCase) or (top_p < 0 or top_p > 1.0): raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''') if not isinstance(__UpperCAmelCase , __UpperCAmelCase) or (min_tokens_to_keep < 1): raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''') a_ = top_p a_ = filter_value a_ = min_tokens_to_keep def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->jnp.ndarray: a_ , a_ = lax.top_k(__UpperCAmelCase , scores.shape[-1]) a_ = jnp.full_like(__UpperCAmelCase , self.filter_value) a_ = jax.nn.softmax(__UpperCAmelCase , axis=-1).cumsum(axis=-1) a_ = cumulative_probs < self.top_p # include the token that is higher than top_p as well a_ = jnp.roll(__UpperCAmelCase , 1) score_mask |= score_mask.at[:, 0].set(__UpperCAmelCase) # min tokens to keep a_ = score_mask.at[:, : self.min_tokens_to_keep].set(__UpperCAmelCase) a_ = jnp.where(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a_ = jax.lax.sort_key_val(__UpperCAmelCase , __UpperCAmelCase)[-1] return next_scores class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase = -float("Inf") , __UpperCAmelCase = 1) ->Optional[Any]: if not isinstance(__UpperCAmelCase , __UpperCAmelCase) or top_k <= 0: raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''') a_ = max(__UpperCAmelCase , __UpperCAmelCase) a_ = filter_value def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->jnp.ndarray: a_ , a_ = scores.shape a_ = jnp.full(batch_size * vocab_size , self.filter_value) a_ = min(self.top_k , scores.shape[-1]) # Safety check a_ , a_ = lax.top_k(__UpperCAmelCase , __UpperCAmelCase) a_ = jnp.broadcast_to((jnp.arange(__UpperCAmelCase) * vocab_size)[:, None] , (batch_size, topk)).flatten() a_ = topk_scores.flatten() a_ = topk_indices.flatten() + shift a_ = next_scores_flat.at[topk_indices_flat].set(__UpperCAmelCase) a_ = next_scores_flat.reshape(__UpperCAmelCase , __UpperCAmelCase) return next_scores class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , __UpperCAmelCase) ->Optional[int]: a_ = bos_token_id def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->jnp.ndarray: a_ = jnp.full(scores.shape , -float("inf")) a_ = 1 - jnp.bool_(cur_len - 1) a_ = jnp.where(__UpperCAmelCase , new_scores.at[:, self.bos_token_id].set(0) , __UpperCAmelCase) return scores class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase) ->List[str]: a_ = max_length a_ = eos_token_id def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->jnp.ndarray: a_ = jnp.full(scores.shape , -float("inf")) a_ = 1 - jnp.bool_(cur_len - self.max_length + 1) a_ = jnp.where(__UpperCAmelCase , new_scores.at[:, self.eos_token_id].set(0) , __UpperCAmelCase) return scores class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase) ->Tuple: if not isinstance(__UpperCAmelCase , __UpperCAmelCase) or min_length < 0: raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''') if not isinstance(__UpperCAmelCase , __UpperCAmelCase) or eos_token_id < 0: raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''') a_ = min_length a_ = eos_token_id def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->jnp.ndarray: # create boolean flag to decide if min length penalty should be applied a_ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1) a_ = jnp.where(__UpperCAmelCase , scores.at[:, self.eos_token_id].set(-float("inf")) , __UpperCAmelCase) return scores class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase) ->Optional[int]: a_ = list(__UpperCAmelCase) a_ = begin_index def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Union[str, Any]: a_ = 1 - jnp.bool_(cur_len - self.begin_index) a_ = jnp.where(__UpperCAmelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf")) , __UpperCAmelCase) return scores class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , __UpperCAmelCase) ->int: a_ = list(__UpperCAmelCase) def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->jnp.ndarray: a_ = scores.at[..., self.suppress_tokens].set(-float("inf")) return scores class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , __UpperCAmelCase) ->Any: a_ = dict(__UpperCAmelCase) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. a_ = jnp.ones((max(force_token_map.keys()) + 1) , dtype=jnp.intaa) * -1 for index, token in force_token_map.items(): if token is not None: a_ = force_token_array.at[index].set(__UpperCAmelCase) a_ = jnp.intaa(__UpperCAmelCase) def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->jnp.ndarray: def _force_token(__UpperCAmelCase): a_ = scores.shape[0] a_ = self.force_token_array[generation_idx] a_ = jnp.ones_like(__UpperCAmelCase , dtype=scores.dtype) * -float("inf") a_ = jnp.zeros((batch_size, 1) , dtype=scores.dtype) a_ = lax.dynamic_update_slice(__UpperCAmelCase , __UpperCAmelCase , (0, current_token)) return new_scores a_ = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(__UpperCAmelCase) , lambda: scores , ) , ) return scores class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int: a_ = generate_config.eos_token_id a_ = generate_config.no_timestamps_token_id a_ = generate_config.no_timestamps_token_id + 1 a_ = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(__UpperCAmelCase , "max_initial_timestamp_index"): a_ = generate_config.max_initial_timestamp_index else: a_ = model_config.vocab_size if self.max_initial_timestamp_index is None: a_ = model_config.vocab_size def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Dict: # suppress <|notimestamps|> which is handled by without_timestamps a_ = scores.at[:, self.no_timestamps_token_id].set(-float("inf")) def handle_pairs(__UpperCAmelCase , __UpperCAmelCase): a_ = jnp.where((cur_len - self.begin_index) >= 1 , __UpperCAmelCase , __UpperCAmelCase) a_ = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __UpperCAmelCase , ) a_ = jnp.where((cur_len - self.begin_index) < 2 , __UpperCAmelCase , __UpperCAmelCase) a_ = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , __UpperCAmelCase , __UpperCAmelCase , ) return jnp.where( __UpperCAmelCase , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf")) , scores_k.at[: self.eos_token_id].set(-float("inf")) , ) , __UpperCAmelCase , ) a_ = jax.vmap(__UpperCAmelCase)(__UpperCAmelCase , __UpperCAmelCase) a_ = jnp.where(cur_len == self.begin_index , __UpperCAmelCase , __UpperCAmelCase) a_ = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __UpperCAmelCase , ) a_ = self.timestamp_begin + self.max_initial_timestamp_index a_ = jnp.where( __UpperCAmelCase , scores.at[:, last_allowed + 1 :].set(-float("inf")) , __UpperCAmelCase , ) # if sum of probability over timestamps is above any other token, sample timestamp a_ = jax.nn.log_softmax(__UpperCAmelCase , axis=-1) def handle_cumulative_probs(__UpperCAmelCase , __UpperCAmelCase): a_ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1) a_ = jnp.max(logprobs_k[: self.timestamp_begin]) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf")) , __UpperCAmelCase , ) a_ = jax.vmap(__UpperCAmelCase)(__UpperCAmelCase , __UpperCAmelCase) return scores
243
"""simple docstring""" import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCamelCase_ = 'src/diffusers' UpperCamelCase_ = '.' # This is to make sure the diffusers module imported is the one in the repo. UpperCamelCase_ = importlib.util.spec_from_file_location( 'diffusers', os.path.join(DIFFUSERS_PATH, '__init__.py'), submodule_search_locations=[DIFFUSERS_PATH], ) UpperCamelCase_ = spec.loader.load_module() def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Dict: """simple docstring""" return line.startswith(UpperCAmelCase ) or len(UpperCAmelCase ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , UpperCAmelCase ) is not None def UpperCamelCase ( UpperCAmelCase ) ->Any: """simple docstring""" a_ = object_name.split("." ) a_ = 0 # First let's find the module where our object lives. a_ = parts[i] while i < len(UpperCAmelCase ) and not os.path.isfile(os.path.join(UpperCAmelCase , F'''{module}.py''' ) ): i += 1 if i < len(UpperCAmelCase ): a_ = os.path.join(UpperCAmelCase , parts[i] ) if i >= len(UpperCAmelCase ): raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(UpperCAmelCase , F'''{module}.py''' ) , "r" , encoding="utf-8" , newline="\n" ) as f: a_ = f.readlines() # Now let's find the class / func in the code! a_ = "" a_ = 0 for name in parts[i + 1 :]: while ( line_index < len(UpperCAmelCase ) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(UpperCAmelCase ): raise ValueError(F''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). a_ = line_index while line_index < len(UpperCAmelCase ) and _should_continue(lines[line_index] , UpperCAmelCase ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 a_ = lines[start_index:line_index] return "".join(UpperCAmelCase ) UpperCamelCase_ = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)') UpperCamelCase_ = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)') UpperCamelCase_ = re.compile(R'<FILL\s+[^>]*>') def UpperCamelCase ( UpperCAmelCase ) ->int: """simple docstring""" a_ = code.split("\n" ) a_ = 0 while idx < len(UpperCAmelCase ) and len(lines[idx] ) == 0: idx += 1 if idx < len(UpperCAmelCase ): return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0] return "" def UpperCamelCase ( UpperCAmelCase ) ->int: """simple docstring""" a_ = len(get_indent(UpperCAmelCase ) ) > 0 if has_indent: a_ = F'''class Bla:\n{code}''' a_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCAmelCase ) a_ = black.format_str(UpperCAmelCase , mode=UpperCAmelCase ) a_ , a_ = style_docstrings_in_code(UpperCAmelCase ) return result[len("class Bla:\n" ) :] if has_indent else result def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->str: """simple docstring""" with open(UpperCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f: a_ = f.readlines() a_ = [] a_ = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(UpperCAmelCase ): a_ = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. a_ , a_ , a_ = search.groups() a_ = find_code_in_diffusers(UpperCAmelCase ) a_ = get_indent(UpperCAmelCase ) a_ = line_index + 1 if indent == theoretical_indent else line_index + 2 a_ = theoretical_indent a_ = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. a_ = True while line_index < len(UpperCAmelCase ) and should_continue: line_index += 1 if line_index >= len(UpperCAmelCase ): break a_ = lines[line_index] a_ = _should_continue(UpperCAmelCase , UpperCAmelCase ) and re.search(F'''^{indent}# End copy''' , UpperCAmelCase ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 a_ = lines[start_index:line_index] a_ = "".join(UpperCAmelCase ) # Remove any nested `Copied from` comments to avoid circular copies a_ = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(UpperCAmelCase ) is None] a_ = "\n".join(UpperCAmelCase ) # Before comparing, use the `replace_pattern` on the original code. if len(UpperCAmelCase ) > 0: a_ = replace_pattern.replace("with" , "" ).split("," ) a_ = [_re_replace_pattern.search(UpperCAmelCase ) for p in patterns] for pattern in patterns: if pattern is None: continue a_ , a_ , a_ = pattern.groups() a_ = re.sub(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) if option.strip() == "all-casing": a_ = re.sub(obja.lower() , obja.lower() , UpperCAmelCase ) a_ = re.sub(obja.upper() , obja.upper() , UpperCAmelCase ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line a_ = blackify(lines[start_index - 1] + theoretical_code ) a_ = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: a_ = lines[:start_index] + [theoretical_code] + lines[line_index:] a_ = start_index + 1 if overwrite and len(UpperCAmelCase ) > 0: # Warn the user a file has been modified. print(F'''Detected changes, rewriting {filename}.''' ) with open(UpperCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(UpperCAmelCase ) return diffs def UpperCamelCase ( UpperCAmelCase = False ) ->int: """simple docstring""" a_ = glob.glob(os.path.join(UpperCAmelCase , "**/*.py" ) , recursive=UpperCAmelCase ) a_ = [] for filename in all_files: a_ = is_copy_consistent(UpperCAmelCase , UpperCAmelCase ) diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(UpperCAmelCase ) > 0: a_ = "\n".join(UpperCAmelCase ) raise Exception( "Found the following copy inconsistencies:\n" + diff + "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') UpperCamelCase_ = parser.parse_args() check_copies(args.fix_and_overwrite)
243
1
"""simple docstring""" from __future__ import annotations import queue class _SCREAMING_SNAKE_CASE: def __init__( self ,SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE :Optional[Any] = data __SCREAMING_SNAKE_CASE :List[str] = None __SCREAMING_SNAKE_CASE :Any = None def __lowerCamelCase ( ) -> Optional[int]: print('''\n********Press N to stop entering at any point of time********\n''' ) __SCREAMING_SNAKE_CASE :Any = input('''Enter the value of the root node: ''' ).strip().lower() __SCREAMING_SNAKE_CASE :queue.Queue = queue.Queue() __SCREAMING_SNAKE_CASE :Tuple = TreeNode(int(_a ) ) q.put(_a ) while not q.empty(): __SCREAMING_SNAKE_CASE :Optional[int] = q.get() __SCREAMING_SNAKE_CASE :str = f'''Enter the left node of {node_found.data}: ''' __SCREAMING_SNAKE_CASE :Tuple = input(_a ).strip().lower() or """n""" if check == "n": return tree_node __SCREAMING_SNAKE_CASE :str = TreeNode(int(_a ) ) __SCREAMING_SNAKE_CASE :List[str] = left_node q.put(_a ) __SCREAMING_SNAKE_CASE :Union[str, Any] = f'''Enter the right node of {node_found.data}: ''' __SCREAMING_SNAKE_CASE :Optional[int] = input(_a ).strip().lower() or """n""" if check == "n": return tree_node __SCREAMING_SNAKE_CASE :Optional[Any] = TreeNode(int(_a ) ) __SCREAMING_SNAKE_CASE :Dict = right_node q.put(_a ) raise def __lowerCamelCase ( a_ : TreeNode ) -> Dict: if not isinstance(_a , _a ) or not node: return print(node.data , end=''',''' ) pre_order(node.left ) pre_order(node.right ) def __lowerCamelCase ( a_ : TreeNode ) -> Union[str, Any]: if not isinstance(_a , _a ) or not node: return in_order(node.left ) print(node.data , end=''',''' ) in_order(node.right ) def __lowerCamelCase ( a_ : TreeNode ) -> int: if not isinstance(_a , _a ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=''',''' ) def __lowerCamelCase ( a_ : TreeNode ) -> Optional[Any]: if not isinstance(_a , _a ) or not node: return __SCREAMING_SNAKE_CASE :queue.Queue = queue.Queue() q.put(_a ) while not q.empty(): __SCREAMING_SNAKE_CASE :List[str] = q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def __lowerCamelCase ( a_ : TreeNode ) -> Dict: if not isinstance(_a , _a ) or not node: return __SCREAMING_SNAKE_CASE :queue.Queue = queue.Queue() q.put(_a ) while not q.empty(): __SCREAMING_SNAKE_CASE :Dict = [] while not q.empty(): __SCREAMING_SNAKE_CASE :int = q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(_a ) def __lowerCamelCase ( a_ : TreeNode ) -> List[str]: if not isinstance(_a , _a ) or not node: return __SCREAMING_SNAKE_CASE :list[TreeNode] = [] __SCREAMING_SNAKE_CASE :Union[str, Any] = node while n or stack: while n: # start from root node, find its left child print(n.data , end=''',''' ) stack.append(_a ) __SCREAMING_SNAKE_CASE :str = n.left # end of while means current node doesn't have left child __SCREAMING_SNAKE_CASE :Tuple = stack.pop() # start to traverse its right child __SCREAMING_SNAKE_CASE :Any = n.right def __lowerCamelCase ( a_ : TreeNode ) -> Union[str, Any]: if not isinstance(_a , _a ) or not node: return __SCREAMING_SNAKE_CASE :list[TreeNode] = [] __SCREAMING_SNAKE_CASE :Optional[Any] = node while n or stack: while n: stack.append(_a ) __SCREAMING_SNAKE_CASE :Optional[Any] = n.left __SCREAMING_SNAKE_CASE :Dict = stack.pop() print(n.data , end=''',''' ) __SCREAMING_SNAKE_CASE :Optional[Any] = n.right def __lowerCamelCase ( a_ : TreeNode ) -> str: if not isinstance(_a , _a ) or not node: return __SCREAMING_SNAKE_CASE :str = [], [] __SCREAMING_SNAKE_CASE :List[str] = node stacka.append(_a ) while stacka: # to find the reversed order of post order, store it in stack2 __SCREAMING_SNAKE_CASE :Any = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(_a ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=''',''' ) def __lowerCamelCase ( a_ : str = "" , a_ : str=50 , a_ : str="*" ) -> List[Any]: if not s: return "\n" + width * char __SCREAMING_SNAKE_CASE :str = divmod(width - len(_a ) - 2 , 2 ) return f'''{left * char} {s} {(left + extra) * char}''' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("Binary Tree Traversals")) lowerCamelCase_ = build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") print(prompt("In Order Traversal")) in_order(node) print(prompt() + "\n") print(prompt("Post Order Traversal")) post_order(node) print(prompt() + "\n") print(prompt("Level Order Traversal")) level_order(node) print(prompt() + "\n") print(prompt("Actual Level Order Traversal")) level_order_actual(node) print("*" * 5_0 + "\n") print(prompt("Pre Order Traversal - Iteration Version")) pre_order_iter(node) print(prompt() + "\n") print(prompt("In Order Traversal - Iteration Version")) in_order_iter(node) print(prompt() + "\n") print(prompt("Post Order Traversal - Iteration Version")) post_order_iter(node) print(prompt())
365
"""simple docstring""" import colorsys from PIL import Image # type: ignore def __lowerCamelCase ( a_ : float , a_ : float , a_ : int ) -> float: __SCREAMING_SNAKE_CASE :List[Any] = x __SCREAMING_SNAKE_CASE :List[Any] = y for step in range(a_ ): # noqa: B007 __SCREAMING_SNAKE_CASE :Dict = a * a - b * b + x __SCREAMING_SNAKE_CASE :Tuple = 2 * a * b + y __SCREAMING_SNAKE_CASE :Dict = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def __lowerCamelCase ( a_ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return (2_55, 2_55, 2_55) def __lowerCamelCase ( a_ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(a_ , 1 , 1 ) ) def __lowerCamelCase ( a_ : int = 8_00 , a_ : int = 6_00 , a_ : float = -0.6 , a_ : float = 0 , a_ : float = 3.2 , a_ : int = 50 , a_ : bool = True , ) -> Image.Image: __SCREAMING_SNAKE_CASE :Optional[int] = Image.new('''RGB''' , (image_width, image_height) ) __SCREAMING_SNAKE_CASE :Tuple = img.load() # loop through the image-coordinates for image_x in range(a_ ): for image_y in range(a_ ): # determine the figure-coordinates based on the image-coordinates __SCREAMING_SNAKE_CASE :Dict = figure_width / image_width * image_height __SCREAMING_SNAKE_CASE :str = figure_center_x + (image_x / image_width - 0.5) * figure_width __SCREAMING_SNAKE_CASE :Tuple = figure_center_y + (image_y / image_height - 0.5) * figure_height __SCREAMING_SNAKE_CASE :List[Any] = get_distance(a_ , a_ , a_ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: __SCREAMING_SNAKE_CASE :Optional[int] = get_color_coded_rgb(a_ ) else: __SCREAMING_SNAKE_CASE :Optional[Any] = get_black_and_white_rgb(a_ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure lowerCamelCase_ = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
239
0
import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowercase = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( snake_case_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase = AlbertTokenizer lowerCAmelCase = AlbertTokenizerFast lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = True def _UpperCamelCase ( self ) -> List[Any]: super().setUp() # We have a SentencePiece fixture for testing snake_case_ = AlbertTokenizer(a ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCamelCase ( self , a ) -> str: snake_case_ = 'this is a test' snake_case_ = 'this is a test' return input_text, output_text def _UpperCamelCase ( self ) -> int: snake_case_ = '<pad>' snake_case_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a ) def _UpperCamelCase ( self ) -> Optional[Any]: snake_case_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<pad>' ) self.assertEqual(vocab_keys[1] , '<unk>' ) self.assertEqual(vocab_keys[-1] , '▁eloquent' ) self.assertEqual(len(a ) , 3_00_00 ) def _UpperCamelCase ( self ) -> Any: self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 ) def _UpperCamelCase ( self ) -> Tuple: if not self.test_rust_tokenizer: return snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = tokenizer.tokenize(a ) snake_case_ = rust_tokenizer.tokenize(a ) self.assertListEqual(a , a ) snake_case_ = tokenizer.encode(a , add_special_tokens=a ) snake_case_ = rust_tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) snake_case_ = self.get_rust_tokenizer() snake_case_ = tokenizer.encode(a ) snake_case_ = rust_tokenizer.encode(a ) self.assertListEqual(a , a ) def _UpperCamelCase ( self ) -> List[str]: snake_case_ = AlbertTokenizer(a , keep_accents=a ) snake_case_ = tokenizer.tokenize('This is a test' ) self.assertListEqual(a , ['▁this', '▁is', '▁a', '▁test'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [48, 25, 21, 12_89] ) snake_case_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( a , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] ) snake_case_ = tokenizer.convert_tokens_to_ids(a ) self.assertListEqual(a , [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] ) snake_case_ = tokenizer.convert_ids_to_tokens(a ) self.assertListEqual( a , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , ) def _UpperCamelCase ( self ) -> Dict: snake_case_ = AlbertTokenizer(a ) snake_case_ = tokenizer.encode('sequence builders' ) snake_case_ = tokenizer.encode('multi-sequence build' ) snake_case_ = tokenizer.build_inputs_with_special_tokens(a ) snake_case_ = tokenizer.build_inputs_with_special_tokens(a , a ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def _UpperCamelCase ( self ) -> int: # fmt: off snake_case_ = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
178
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _UpperCamelCase ( self ) -> int: snake_case_ = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' ) snake_case_ = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' ) model.to(a ) from datasets import load_dataset snake_case_ = load_dataset('nielsr/rvlcdip-demo' ) snake_case_ = dataset['train'][0]['image'].convert('RGB' ) snake_case_ = image_processor(a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): snake_case_ = model(**a ) snake_case_ = outputs.logits snake_case_ = torch.Size((1, 16) ) self.assertEqual(logits.shape , a ) snake_case_ = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1E-4 ) )
178
1
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : Optional[int] = logging.get_logger(__name__) lowercase__ : Union[str, Any] = { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = '''xlnet''' lowerCAmelCase = ['''mems'''] lowerCAmelCase = { '''n_token''': '''vocab_size''', # Backward compatibility '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , _UpperCAmelCase=3_2000 , _UpperCAmelCase=1024 , _UpperCAmelCase=24 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=True , _UpperCAmelCase="bi" , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=-1 , _UpperCAmelCase=False , _UpperCAmelCase="last" , _UpperCAmelCase=True , _UpperCAmelCase="tanh" , _UpperCAmelCase=0.1 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , **_UpperCAmelCase , ): '''simple docstring''' __A : List[Any] = vocab_size __A : Optional[Any] = d_model __A : Dict = n_layer __A : Union[str, Any] = n_head if d_model % n_head != 0: raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0') if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})') __A : Tuple = d_model // n_head __A : Optional[int] = ff_activation __A : Union[str, Any] = d_inner __A : List[Any] = untie_r __A : List[str] = attn_type __A : int = initializer_range __A : List[Any] = layer_norm_eps __A : List[Any] = dropout __A : int = mem_len __A : Optional[int] = reuse_len __A : Union[str, Any] = bi_data __A : Any = clamp_len __A : str = same_length __A : List[str] = summary_type __A : Optional[int] = summary_use_proj __A : Union[str, Any] = summary_activation __A : str = summary_last_dropout __A : Tuple = start_n_top __A : Tuple = end_n_top __A : Optional[Any] = bos_token_id __A : Tuple = pad_token_id __A : List[Any] = eos_token_id if "use_cache" in kwargs: warnings.warn( 'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`' ' instead.' , _UpperCAmelCase , ) __A : Optional[Any] = kwargs['use_cache'] __A : int = use_mems_eval __A : Any = use_mems_train super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase) @property def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.') return -1 @max_position_embeddings.setter def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' raise NotImplementedError( F'The model {self.model_type} is one of the few models that has no sequence length limit.')
190
'''simple docstring''' import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class SCREAMING_SNAKE_CASE (a__ ): def __get__( self , _UpperCAmelCase , _UpperCAmelCase=None): '''simple docstring''' if obj is None: return self if self.fget is None: raise AttributeError('unreadable attribute') __A : Optional[Any] = '__cached_' + self.fget.__name__ __A : Union[str, Any] = getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) if cached is None: __A : int = self.fget(_UpperCAmelCase) setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) return cached def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Tuple: __A : Dict = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(f'invalid truth value {val!r}' ) def _lowerCAmelCase ( __snake_case : int ) -> Tuple: if is_torch_fx_proxy(__snake_case ): return True if is_torch_available(): import torch if isinstance(__snake_case , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(__snake_case , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(__snake_case , (jnp.ndarray, Tracer) ): return True return isinstance(__snake_case , np.ndarray ) def _lowerCAmelCase ( __snake_case : Dict ) -> List[str]: return isinstance(__snake_case , np.ndarray ) def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Any: return _is_numpy(__snake_case ) def _lowerCAmelCase ( __snake_case : int ) -> Union[str, Any]: import torch return isinstance(__snake_case , torch.Tensor ) def _lowerCAmelCase ( __snake_case : str ) -> Optional[int]: return False if not is_torch_available() else _is_torch(__snake_case ) def _lowerCAmelCase ( __snake_case : Any ) -> List[Any]: import torch return isinstance(__snake_case , torch.device ) def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict: return False if not is_torch_available() else _is_torch_device(__snake_case ) def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Optional[int]: import torch if isinstance(__snake_case , __snake_case ): if hasattr(__snake_case , __snake_case ): __A : str = getattr(__snake_case , __snake_case ) else: return False return isinstance(__snake_case , torch.dtype ) def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Any: return False if not is_torch_available() else _is_torch_dtype(__snake_case ) def _lowerCAmelCase ( __snake_case : List[str] ) -> int: import tensorflow as tf return isinstance(__snake_case , tf.Tensor ) def _lowerCAmelCase ( __snake_case : Dict ) -> List[Any]: return False if not is_tf_available() else _is_tensorflow(__snake_case ) def _lowerCAmelCase ( __snake_case : str ) -> List[str]: import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(__snake_case , 'is_symbolic_tensor' ): return tf.is_symbolic_tensor(__snake_case ) return type(__snake_case ) == tf.Tensor def _lowerCAmelCase ( __snake_case : Dict ) -> Tuple: return False if not is_tf_available() else _is_tf_symbolic_tensor(__snake_case ) def _lowerCAmelCase ( __snake_case : int ) -> Union[str, Any]: import jax.numpy as jnp # noqa: F811 return isinstance(__snake_case , jnp.ndarray ) def _lowerCAmelCase ( __snake_case : int ) -> List[str]: return False if not is_flax_available() else _is_jax(__snake_case ) def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Dict: if isinstance(__snake_case , (dict, UserDict) ): return {k: to_py_obj(__snake_case ) for k, v in obj.items()} elif isinstance(__snake_case , (list, tuple) ): return [to_py_obj(__snake_case ) for o in obj] elif is_tf_tensor(__snake_case ): return obj.numpy().tolist() elif is_torch_tensor(__snake_case ): return obj.detach().cpu().tolist() elif is_jax_tensor(__snake_case ): return np.asarray(__snake_case ).tolist() elif isinstance(__snake_case , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def _lowerCAmelCase ( __snake_case : Tuple ) -> Optional[int]: if isinstance(__snake_case , (dict, UserDict) ): return {k: to_numpy(__snake_case ) for k, v in obj.items()} elif isinstance(__snake_case , (list, tuple) ): return np.array(__snake_case ) elif is_tf_tensor(__snake_case ): return obj.numpy() elif is_torch_tensor(__snake_case ): return obj.detach().cpu().numpy() elif is_jax_tensor(__snake_case ): return np.asarray(__snake_case ) else: return obj class SCREAMING_SNAKE_CASE (a__ ): def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[str] = fields(self) # Safety and consistency checks if not len(_UpperCAmelCase): raise ValueError(F'{self.__class__.__name__} has no fields.') if not all(field.default is None for field in class_fields[1:]): raise ValueError(F'{self.__class__.__name__} should not have more than one required field.') __A : Tuple = getattr(self , class_fields[0].name) __A : Tuple = all(getattr(self , field.name) is None for field in class_fields[1:]) if other_fields_are_none and not is_tensor(_UpperCAmelCase): if isinstance(_UpperCAmelCase , _UpperCAmelCase): __A : List[str] = first_field.items() __A : List[Any] = True else: try: __A : List[Any] = iter(_UpperCAmelCase) __A : Optional[Any] = True except TypeError: __A : List[Any] = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(_UpperCAmelCase): if ( not isinstance(_UpperCAmelCase , (list, tuple)) or not len(_UpperCAmelCase) == 2 or not isinstance(element[0] , _UpperCAmelCase) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute __A : Optional[int] = first_field else: # If we have a mixed iterator, raise an error raise ValueError( F'Cannot set key/value for {element}. It needs to be a tuple (key, value).') break setattr(self , element[0] , element[1]) if element[1] is not None: __A : Optional[int] = element[1] elif first_field is not None: __A : Dict = first_field else: for field in class_fields: __A : List[str] = getattr(self , field.name) if v is not None: __A : Union[str, Any] = v def __delitem__( self , *_UpperCAmelCase , **_UpperCAmelCase): '''simple docstring''' raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.') def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase): '''simple docstring''' raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.') def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase): '''simple docstring''' raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.') def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase): '''simple docstring''' raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.') def __getitem__( self , _UpperCAmelCase): '''simple docstring''' if isinstance(_UpperCAmelCase , _UpperCAmelCase): __A : List[Any] = dict(self.items()) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(_UpperCAmelCase , _UpperCAmelCase) super().__setattr__(_UpperCAmelCase , _UpperCAmelCase) def __setitem__( self , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' super().__setitem__(_UpperCAmelCase , _UpperCAmelCase) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(_UpperCAmelCase , _UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return tuple(self[k] for k in self.keys()) class SCREAMING_SNAKE_CASE (a__ , a__ ): @classmethod def SCREAMING_SNAKE_CASE ( cls , _UpperCAmelCase): '''simple docstring''' raise ValueError( F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}') class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = '''longest''' lowerCAmelCase = '''max_length''' lowerCAmelCase = '''do_not_pad''' class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = '''pt''' lowerCAmelCase = '''tf''' lowerCAmelCase = '''np''' lowerCAmelCase = '''jax''' class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase): '''simple docstring''' __A : Optional[Any] = context_managers __A : Optional[int] = ExitStack() def __enter__( self): '''simple docstring''' for context_manager in self.context_managers: self.stack.enter_context(_UpperCAmelCase) def __exit__( self , *_UpperCAmelCase , **_UpperCAmelCase): '''simple docstring''' self.stack.__exit__(*_UpperCAmelCase , **_UpperCAmelCase) def _lowerCAmelCase ( __snake_case : List[str] ) -> int: __A : Any = infer_framework(__snake_case ) if framework == "tf": __A : int = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": __A : Any = inspect.signature(model_class.forward ) # PyTorch models else: __A : Union[str, Any] = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def _lowerCAmelCase ( __snake_case : int ) -> List[Any]: __A : Any = model_class.__name__ __A : Optional[int] = infer_framework(__snake_case ) if framework == "tf": __A : List[str] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": __A : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: __A : List[Any] = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def _lowerCAmelCase ( __snake_case : MutableMapping , __snake_case : str = "" , __snake_case : str = "." ) -> Union[str, Any]: def _flatten_dict(__snake_case : Tuple , __snake_case : List[Any]="" , __snake_case : Tuple="." ): for k, v in d.items(): __A : List[Any] = str(__snake_case ) + delimiter + str(__snake_case ) if parent_key else k if v and isinstance(__snake_case , __snake_case ): yield from flatten_dict(__snake_case , __snake_case , delimiter=__snake_case ).items() else: yield key, v return dict(_flatten_dict(__snake_case , __snake_case , __snake_case ) ) @contextmanager def _lowerCAmelCase ( __snake_case : Any , __snake_case : bool = False ) -> List[str]: if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : Optional[Any]=None ) -> int: if is_numpy_array(__snake_case ): return np.transpose(__snake_case , axes=__snake_case ) elif is_torch_tensor(__snake_case ): return array.T if axes is None else array.permute(*__snake_case ) elif is_tf_tensor(__snake_case ): import tensorflow as tf return tf.transpose(__snake_case , perm=__snake_case ) elif is_jax_tensor(__snake_case ): return jnp.transpose(__snake_case , axes=__snake_case ) else: raise ValueError(f'Type not supported for transpose: {type(__snake_case )}.' ) def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : str ) -> str: if is_numpy_array(__snake_case ): return np.reshape(__snake_case , __snake_case ) elif is_torch_tensor(__snake_case ): return array.reshape(*__snake_case ) elif is_tf_tensor(__snake_case ): import tensorflow as tf return tf.reshape(__snake_case , __snake_case ) elif is_jax_tensor(__snake_case ): return jnp.reshape(__snake_case , __snake_case ) else: raise ValueError(f'Type not supported for reshape: {type(__snake_case )}.' ) def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : List[str]=None ) -> Any: if is_numpy_array(__snake_case ): return np.squeeze(__snake_case , axis=__snake_case ) elif is_torch_tensor(__snake_case ): return array.squeeze() if axis is None else array.squeeze(dim=__snake_case ) elif is_tf_tensor(__snake_case ): import tensorflow as tf return tf.squeeze(__snake_case , axis=__snake_case ) elif is_jax_tensor(__snake_case ): return jnp.squeeze(__snake_case , axis=__snake_case ) else: raise ValueError(f'Type not supported for squeeze: {type(__snake_case )}.' ) def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> int: if is_numpy_array(__snake_case ): return np.expand_dims(__snake_case , __snake_case ) elif is_torch_tensor(__snake_case ): return array.unsqueeze(dim=__snake_case ) elif is_tf_tensor(__snake_case ): import tensorflow as tf return tf.expand_dims(__snake_case , axis=__snake_case ) elif is_jax_tensor(__snake_case ): return jnp.expand_dims(__snake_case , axis=__snake_case ) else: raise ValueError(f'Type not supported for expand_dims: {type(__snake_case )}.' ) def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Dict: if is_numpy_array(__snake_case ): return np.size(__snake_case ) elif is_torch_tensor(__snake_case ): return array.numel() elif is_tf_tensor(__snake_case ): import tensorflow as tf return tf.size(__snake_case ) elif is_jax_tensor(__snake_case ): return array.size else: raise ValueError(f'Type not supported for expand_dims: {type(__snake_case )}.' ) def _lowerCAmelCase ( __snake_case : str , __snake_case : Tuple ) -> Union[str, Any]: for key, value in auto_map.items(): if isinstance(__snake_case , (tuple, list) ): __A : Tuple = [f'{repo_id}--{v}' if (v is not None and '--' not in v) else v for v in value] elif value is not None and "--" not in value: __A : Dict = f'{repo_id}--{value}' return auto_map def _lowerCAmelCase ( __snake_case : List[str] ) -> int: for base_class in inspect.getmro(__snake_case ): __A : int = base_class.__module__ __A : List[str] = base_class.__name__ if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('torch' ) or name == "PreTrainedModel": return "pt" elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(f'Could not infer framework from class {model_class}.' )
190
1
'''simple docstring''' import argparse import struct import unittest class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase ) -> None: _snake_case = data # Initialize hash values _snake_case = [ 0x6A_09_E6_67, 0xBB_67_AE_85, 0x3C_6E_F3_72, 0xA5_4F_F5_3A, 0x51_0E_52_7F, 0x9B_05_68_8C, 0x1F_83_D9_AB, 0x5B_E0_CD_19, ] # Initialize round constants _snake_case = [ 0x42_8A_2F_98, 0x71_37_44_91, 0xB5_C0_FB_CF, 0xE9_B5_DB_A5, 0x39_56_C2_5B, 0x59_F1_11_F1, 0x92_3F_82_A4, 0xAB_1C_5E_D5, 0xD8_07_AA_98, 0x12_83_5B_01, 0x24_31_85_BE, 0x55_0C_7D_C3, 0x72_BE_5D_74, 0x80_DE_B1_FE, 0x9B_DC_06_A7, 0xC1_9B_F1_74, 0xE4_9B_69_C1, 0xEF_BE_47_86, 0x0F_C1_9D_C6, 0x24_0C_A1_CC, 0x2D_E9_2C_6F, 0x4A_74_84_AA, 0x5C_B0_A9_DC, 0x76_F9_88_DA, 0x98_3E_51_52, 0xA8_31_C6_6D, 0xB0_03_27_C8, 0xBF_59_7F_C7, 0xC6_E0_0B_F3, 0xD5_A7_91_47, 0x06_CA_63_51, 0x14_29_29_67, 0x27_B7_0A_85, 0x2E_1B_21_38, 0x4D_2C_6D_FC, 0x53_38_0D_13, 0x65_0A_73_54, 0x76_6A_0A_BB, 0x81_C2_C9_2E, 0x92_72_2C_85, 0xA2_BF_E8_A1, 0xA8_1A_66_4B, 0xC2_4B_8B_70, 0xC7_6C_51_A3, 0xD1_92_E8_19, 0xD6_99_06_24, 0xF4_0E_35_85, 0x10_6A_A0_70, 0x19_A4_C1_16, 0x1E_37_6C_08, 0x27_48_77_4C, 0x34_B0_BC_B5, 0x39_1C_0C_B3, 0x4E_D8_AA_4A, 0x5B_9C_CA_4F, 0x68_2E_6F_F3, 0x74_8F_82_EE, 0x78_A5_63_6F, 0x84_C8_78_14, 0x8C_C7_02_08, 0x90_BE_FF_FA, 0xA4_50_6C_EB, 0xBE_F9_A3_F7, 0xC6_71_78_F2, ] _snake_case = self.preprocessing(self.data ) self.final_hash() @staticmethod def lowercase (UpperCAmelCase ) -> bytes: _snake_case = B"""\x80""" + (B"""\x00""" * (63 - (len(UpperCAmelCase ) + 8) % 64)) _snake_case = struct.pack(""">Q""" , (len(UpperCAmelCase ) * 8) ) return data + padding + big_endian_integer def lowercase (self ) -> None: # Convert into blocks of 64 bytes _snake_case = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers _snake_case = list(struct.unpack(""">16L""" , UpperCAmelCase ) ) # add 48 0-ed integers words += [0] * 48 _snake_case, _snake_case, _snake_case, _snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array _snake_case = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) _snake_case = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) _snake_case = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression _snake_case = self.ror(UpperCAmelCase , 6 ) ^ self.ror(UpperCAmelCase , 11 ) ^ self.ror(UpperCAmelCase , 25 ) _snake_case = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g) _snake_case = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 _snake_case = self.ror(UpperCAmelCase , 2 ) ^ self.ror(UpperCAmelCase , 13 ) ^ self.ror(UpperCAmelCase , 22 ) _snake_case = (a & b) ^ (a & c) ^ (b & c) _snake_case = (sa + maj) % 0x1_00_00_00_00 _snake_case, _snake_case, _snake_case, _snake_case, _snake_case, _snake_case, _snake_case, _snake_case = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) _snake_case = [a, b, c, d, e, f, g, h] # Modify final values _snake_case = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] _snake_case = """""".join([hex(UpperCAmelCase )[2:].zfill(8 ) for value in self.hashes] ) def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> int: return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations) class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowercase (self ) -> None: import hashlib _snake_case = bytes("""Test String""" , """utf-8""" ) self.assertEqual(SHAaaa(UpperCAmelCase ).hash , hashlib.shaaaa(UpperCAmelCase ).hexdigest() ) def __SCREAMING_SNAKE_CASE ( ): import doctest doctest.testmod() _snake_case = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) _snake_case = parser.parse_args() _snake_case = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: _snake_case = f.read() else: _snake_case = bytes(_SCREAMING_SNAKE_CASE , """utf-8""" ) print(SHAaaa(_SCREAMING_SNAKE_CASE ).hash ) if __name__ == "__main__": main()
341
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=[10, 20, 30, 40] , UpperCAmelCase=[2, 2, 3, 2] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=10 , UpperCAmelCase=0.02 , UpperCAmelCase=["stage2", "stage3", "stage4"] , UpperCAmelCase=3 , UpperCAmelCase=None , ) -> List[Any]: _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = num_channels _snake_case = num_stages _snake_case = hidden_sizes _snake_case = depths _snake_case = is_training _snake_case = use_labels _snake_case = intermediate_size _snake_case = hidden_act _snake_case = type_sequence_label_size _snake_case = initializer_range _snake_case = out_features _snake_case = num_labels _snake_case = scope _snake_case = num_stages def lowercase (self ) -> List[Any]: _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case = self.get_config() return config, pixel_values, labels def lowercase (self ) -> Tuple: return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def lowercase (self ) -> Any: return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCAmelCase , loss_ignore_index=255 , num_labels=self.num_labels , ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str: _snake_case = UperNetForSemanticSegmentation(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _snake_case = model(UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def lowercase (self ) -> Tuple: _snake_case = self.prepare_config_and_inputs() ( ( _snake_case ), ( _snake_case ), ( _snake_case ), ) = config_and_inputs _snake_case = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = (UperNetForSemanticSegmentation,) if is_torch_available() else () lowerCAmelCase_ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def lowercase (self ) -> Optional[Any]: _snake_case = UperNetModelTester(self ) _snake_case = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 ) def lowercase (self ) -> str: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase (self ) -> Union[str, Any]: return def lowercase (self ) -> Union[str, Any]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(UpperCAmelCase ) _snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCAmelCase ) def lowercase (self ) -> int: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def lowercase (self ) -> int: pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def lowercase (self ) -> List[str]: pass @unittest.skip(reason="""UperNet does not have a base model""" ) def lowercase (self ) -> Union[str, Any]: pass @unittest.skip(reason="""UperNet does not have a base model""" ) def lowercase (self ) -> Union[str, Any]: pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def lowercase (self ) -> str: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowercase (self ) -> int: pass def lowercase (self ) -> List[str]: def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): _snake_case = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() with torch.no_grad(): _snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) _snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case = self.model_tester.num_stages self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def lowercase (self ) -> List[str]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = _config_zero_init(UpperCAmelCase ) _snake_case = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: _snake_case = model_class(config=UpperCAmelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def lowercase (self ) -> Optional[Any]: pass @slow def lowercase (self ) -> Tuple: for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = UperNetForSemanticSegmentation.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( ): _snake_case = hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) _snake_case = Image.open(_SCREAMING_SNAKE_CASE ).convert("""RGB""" ) return image @require_torch @require_vision @slow class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowercase (self ) -> Any: _snake_case = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) _snake_case = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(UpperCAmelCase ) _snake_case = prepare_img() _snake_case = processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase ) with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) _snake_case = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase ) _snake_case = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1e-4 ) ) def lowercase (self ) -> Any: _snake_case = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) _snake_case = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(UpperCAmelCase ) _snake_case = prepare_img() _snake_case = processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase ) with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) _snake_case = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase ) _snake_case = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
341
1
'''simple docstring''' import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : Union[str, Any] , _A : Dict , _A : Optional[int]=7 , _A : Optional[int]=3 , _A : str=18 , _A : Any=30 , _A : Any=400 , _A : Any=True , _A : List[str]=None , _A : int=True , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : Optional[Any]=[0.5, 0.5, 0.5] , ) -> List[Any]: __magic_name__ : Dict = size if size is not None else {'height': 18, 'width': 18} __magic_name__ : Tuple = parent __magic_name__ : str = batch_size __magic_name__ : List[Any] = num_channels __magic_name__ : Dict = image_size __magic_name__ : Union[str, Any] = min_resolution __magic_name__ : int = max_resolution __magic_name__ : Optional[Any] = do_resize __magic_name__ : Optional[Any] = size __magic_name__ : Optional[Any] = do_normalize __magic_name__ : List[str] = image_mean __magic_name__ : Union[str, Any] = image_std def __lowerCAmelCase ( self : Dict ) -> int: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class _lowerCamelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Tuple = DPTImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self : Any ) -> Optional[Any]: __magic_name__ : int = DPTImageProcessingTester(self ) @property def __lowerCAmelCase ( self : Tuple ) -> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: __magic_name__ : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , 'image_mean' ) ) self.assertTrue(hasattr(_A , 'image_std' ) ) self.assertTrue(hasattr(_A , 'do_normalize' ) ) self.assertTrue(hasattr(_A , 'do_resize' ) ) self.assertTrue(hasattr(_A , 'size' ) ) def __lowerCAmelCase ( self : int ) -> Union[str, Any]: __magic_name__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 18} ) __magic_name__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) def __lowerCAmelCase ( self : Dict ) -> Tuple: # Initialize image_processing __magic_name__ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __magic_name__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __magic_name__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched __magic_name__ : List[str] = image_processing(_A , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: # Initialize image_processing __magic_name__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __magic_name__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __magic_name__ : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched __magic_name__ : List[Any] = image_processing(_A , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __lowerCAmelCase ( self : Optional[int] ) -> int: # Initialize image_processing __magic_name__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __magic_name__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __magic_name__ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched __magic_name__ : Any = image_processing(_A , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
275
'''simple docstring''' import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase :Optional[int] = logging.get_logger(__name__) set_seed(7_7_0) lowerCAmelCase :str = { '''c_attn''': '''att_proj''', '''c_proj''': '''out_proj''', '''c_fc''': '''in_proj''', '''transformer.''': '''''', '''h.''': '''layers.''', '''ln_1''': '''layernorm_1''', '''ln_2''': '''layernorm_2''', '''ln_f''': '''layernorm_final''', '''wpe''': '''position_embeds_layer''', '''wte''': '''input_embeds_layer''', } lowerCAmelCase :Any = { '''text_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''text.pt''', }, '''coarse_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''coarse.pt''', }, '''fine_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''fine.pt''', }, '''text''': { '''repo_id''': '''suno/bark''', '''file_name''': '''text_2.pt''', }, '''coarse''': { '''repo_id''': '''suno/bark''', '''file_name''': '''coarse_2.pt''', }, '''fine''': { '''repo_id''': '''suno/bark''', '''file_name''': '''fine_2.pt''', }, } lowerCAmelCase :List[Any] = os.path.dirname(os.path.abspath(__file__)) lowerCAmelCase :List[Any] = os.path.join(os.path.expanduser('''~'''), '''.cache''') lowerCAmelCase :List[str] = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''') def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]=False ): """simple docstring""" __magic_name__ : str = model_type if use_small: key += "_small" return os.path.join(lowerCAmelCase , REMOTE_MODEL_PATHS[key]['file_name'] ) def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ): """simple docstring""" os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase ) hf_hub_download(repo_id=lowerCAmelCase , filename=lowerCAmelCase , local_dir=lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : str="text" ): """simple docstring""" if model_type == "text": __magic_name__ : Tuple = BarkSemanticModel __magic_name__ : Optional[int] = BarkSemanticConfig __magic_name__ : List[Any] = BarkSemanticGenerationConfig elif model_type == "coarse": __magic_name__ : List[str] = BarkCoarseModel __magic_name__ : Dict = BarkCoarseConfig __magic_name__ : Tuple = BarkCoarseGenerationConfig elif model_type == "fine": __magic_name__ : Optional[Any] = BarkFineModel __magic_name__ : Dict = BarkFineConfig __magic_name__ : Tuple = BarkFineGenerationConfig else: raise NotImplementedError() __magic_name__ : int = f'{model_type}_small' if use_small else model_type __magic_name__ : List[str] = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(lowerCAmelCase ): logger.info(f'{model_type} model not found, downloading into `{CACHE_DIR}`.' ) _download(model_info['repo_id'] , model_info['file_name'] ) __magic_name__ : Optional[Any] = torch.load(lowerCAmelCase , map_location=lowerCAmelCase ) # this is a hack __magic_name__ : Optional[Any] = checkpoint['model_args'] if "input_vocab_size" not in model_args: __magic_name__ : Dict = model_args['vocab_size'] __magic_name__ : Optional[int] = model_args['vocab_size'] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments __magic_name__ : Optional[Any] = model_args.pop('n_head' ) __magic_name__ : List[str] = model_args.pop('n_embd' ) __magic_name__ : List[Any] = model_args.pop('n_layer' ) __magic_name__ : Optional[Any] = ConfigClass(**checkpoint['model_args'] ) __magic_name__ : Any = ModelClass(config=lowerCAmelCase ) __magic_name__ : List[str] = GenerationConfigClass() __magic_name__ : List[Any] = model_generation_config __magic_name__ : str = checkpoint['model'] # fixup checkpoint __magic_name__ : str = '_orig_mod.' for k, v in list(state_dict.items() ): if k.startswith(lowerCAmelCase ): # replace part of the key with corresponding layer name in HF implementation __magic_name__ : Tuple = k[len(lowerCAmelCase ) :] for old_layer_name in new_layer_name_dict: __magic_name__ : int = new_k.replace(lowerCAmelCase , new_layer_name_dict[old_layer_name] ) __magic_name__ : Union[str, Any] = state_dict.pop(lowerCAmelCase ) __magic_name__ : Optional[Any] = set(state_dict.keys() ) - set(model.state_dict().keys() ) __magic_name__ : Any = {k for k in extra_keys if not k.endswith('.attn.bias' )} __magic_name__ : Any = set(model.state_dict().keys() ) - set(state_dict.keys() ) __magic_name__ : Dict = {k for k in missing_keys if not k.endswith('.attn.bias' )} if len(lowerCAmelCase ) != 0: raise ValueError(f'extra keys found: {extra_keys}' ) if len(lowerCAmelCase ) != 0: raise ValueError(f'missing keys: {missing_keys}' ) model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase ) __magic_name__ : Union[str, Any] = model.num_parameters(exclude_embeddings=lowerCAmelCase ) __magic_name__ : Optional[Any] = checkpoint['best_val_loss'].item() logger.info(f'model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowerCAmelCase , 3 )} loss' ) model.eval() model.to(lowerCAmelCase ) del checkpoint, state_dict return model def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Tuple="text" ): """simple docstring""" if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() __magic_name__ : List[str] = 'cpu' # do conversion on cpu __magic_name__ : int = _get_ckpt_path(lowerCAmelCase , use_small=lowerCAmelCase ) __magic_name__ : Any = _load_model(lowerCAmelCase , lowerCAmelCase , model_type=lowerCAmelCase , use_small=lowerCAmelCase ) # load bark initial model __magic_name__ : List[str] = _bark_load_model(lowerCAmelCase , 'cpu' , model_type=lowerCAmelCase , use_small=lowerCAmelCase ) if model_type == "text": __magic_name__ : int = bark_model['model'] if model.num_parameters(exclude_embeddings=lowerCAmelCase ) != bark_model.get_num_params(): raise ValueError('initial and new models don\'t have the same number of parameters' ) # check if same output as the bark model __magic_name__ : Union[str, Any] = 5 __magic_name__ : Optional[int] = 10 if model_type in ["text", "coarse"]: __magic_name__ : Optional[Any] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int ) __magic_name__ : List[str] = bark_model(lowerCAmelCase )[0] __magic_name__ : Optional[int] = model(lowerCAmelCase ) # take last logits __magic_name__ : int = output_new_model_total.logits[:, [-1], :] else: __magic_name__ : Tuple = 3 __magic_name__ : List[str] = 8 __magic_name__ : List[str] = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) __magic_name__ : str = model(lowerCAmelCase , lowerCAmelCase ) __magic_name__ : Tuple = bark_model(lowerCAmelCase , lowerCAmelCase ) __magic_name__ : Tuple = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError('initial and new outputs don\'t have the same shape' ) if (output_new_model - output_old_model).abs().max().item() > 1e-3: raise ValueError('initial and new outputs are not equal' ) Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) model.save_pretrained(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : str , ): """simple docstring""" __magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase ) __magic_name__ : Dict = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase , 'config.json' ) ) __magic_name__ : str = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase , 'config.json' ) ) __magic_name__ : int = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase , 'config.json' ) ) __magic_name__ : List[Any] = EncodecConfig.from_pretrained('facebook/encodec_24khz' ) __magic_name__ : Optional[int] = BarkSemanticModel.from_pretrained(lowerCAmelCase ) __magic_name__ : Dict = BarkCoarseModel.from_pretrained(lowerCAmelCase ) __magic_name__ : List[str] = BarkFineModel.from_pretrained(lowerCAmelCase ) __magic_name__ : Optional[Any] = EncodecModel.from_pretrained('facebook/encodec_24khz' ) __magic_name__ : Dict = BarkConfig.from_sub_model_configs( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) __magic_name__ : List[Any] = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) __magic_name__ : int = BarkModel(lowerCAmelCase ) __magic_name__ : List[str] = semantic __magic_name__ : Optional[int] = coarseAcoustic __magic_name__ : List[str] = fineAcoustic __magic_name__ : int = codec __magic_name__ : Union[str, Any] = bark_generation_config Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) bark.save_pretrained(lowerCAmelCase , repo_id=lowerCAmelCase , push_to_hub=lowerCAmelCase ) if __name__ == "__main__": lowerCAmelCase :Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''') lowerCAmelCase :Union[str, Any] = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
275
1
"""simple docstring""" import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput lowercase_ = "scheduler_config.json" class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCAmelCase : str = 1 __UpperCAmelCase : Dict = 2 __UpperCAmelCase : int = 3 __UpperCAmelCase : Tuple = 4 __UpperCAmelCase : Union[str, Any] = 5 @dataclass class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCAmelCase : jnp.ndarray class __lowerCAmelCase : '''simple docstring''' __UpperCAmelCase : int = SCHEDULER_CONFIG_NAME __UpperCAmelCase : str = ['dtype'] __UpperCAmelCase : List[str] = [] __UpperCAmelCase : Any = True @classmethod def __UpperCAmelCase ( cls , _a = None , _a = None , _a=False , **_a , ): __a , __a = cls.load_config( pretrained_model_name_or_path=_a , subfolder=_a , return_unused_kwargs=_a , **_a , ) __a , __a = cls.from_config(_a , return_unused_kwargs=_a , **_a ) if hasattr(_a , '''create_state''' ) and getattr(_a , '''has_state''' , _a ): __a = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def __UpperCAmelCase ( self , _a , _a = False , **_a ): self.save_config(save_directory=_a , push_to_hub=_a , **_a ) @property def __UpperCAmelCase ( self ): return self._get_compatibles() @classmethod def __UpperCAmelCase ( cls ): __a = list(set([cls.__name__] + cls._compatibles ) ) __a = importlib.import_module(__name__.split('''.''' )[0] ) __a = [ getattr(_a , _a ) for c in compatible_classes_str if hasattr(_a , _a ) ] return compatible_classes def lowercase ( lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : Tuple[int] ) -> jnp.ndarray: assert len(lowerCAmelCase__ ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCAmelCase__ ) - x.ndim) ) , lowerCAmelCase__ ) def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : str=0.9_99 , lowerCAmelCase__ : List[str]=jnp.floataa ) -> jnp.ndarray: def alpha_bar(lowerCAmelCase__ : str ): return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2 __a = [] for i in range(lowerCAmelCase__ ): __a = i / num_diffusion_timesteps __a = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(lowerCAmelCase__ ) / alpha_bar(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) return jnp.array(lowerCAmelCase__ , dtype=lowerCAmelCase__ ) @flax.struct.dataclass class __lowerCAmelCase : '''simple docstring''' __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : jnp.ndarray @classmethod def __UpperCAmelCase ( cls , _a ): __a = scheduler.config if config.trained_betas is not None: __a = jnp.asarray(config.trained_betas , dtype=scheduler.dtype ) elif config.beta_schedule == "linear": __a = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __a = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __a = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype ) else: raise NotImplementedError( f'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' ) __a = 1.0 - betas __a = jnp.cumprod(_a , axis=0 ) return cls( alphas=_a , betas=_a , alphas_cumprod=_a , ) def lowercase ( lowerCAmelCase__ : CommonSchedulerState , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : jnp.ndarray ) -> Optional[int]: __a = state.alphas_cumprod __a = alphas_cumprod[timesteps] ** 0.5 __a = sqrt_alpha_prod.flatten() __a = broadcast_to_shape_from_left(lowerCAmelCase__ , original_samples.shape ) __a = (1 - alphas_cumprod[timesteps]) ** 0.5 __a = sqrt_one_minus_alpha_prod.flatten() __a = broadcast_to_shape_from_left(lowerCAmelCase__ , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def lowercase ( lowerCAmelCase__ : CommonSchedulerState , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : jnp.ndarray ) -> Dict: __a , __a = get_sqrt_alpha_prod(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) __a = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def lowercase ( lowerCAmelCase__ : CommonSchedulerState , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : jnp.ndarray ) -> List[Any]: __a , __a = get_sqrt_alpha_prod(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) __a = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
45
import math def lowerCAmelCase_ ( _snake_case : float , _snake_case : float ) -> float: '''simple docstring''' return math.pow(_snake_case , 2 ) - a def lowerCAmelCase_ ( _snake_case : float ) -> float: '''simple docstring''' return 2 * x def lowerCAmelCase_ ( _snake_case : float ) -> float: '''simple docstring''' __magic_name__ : Optional[int] = 2.0 while start <= a: __magic_name__ : str = math.pow(_snake_case , 2 ) return start def lowerCAmelCase_ ( _snake_case : float , _snake_case : int = 9999 , _snake_case : float = 0.00_000_000_000_001 ) -> float: '''simple docstring''' if a < 0: raise ValueError("math domain error" ) __magic_name__ : Optional[int] = get_initial_point(_snake_case ) for _ in range(_snake_case ): __magic_name__ : int = value __magic_name__ : str = value - fx(_snake_case , _snake_case ) / fx_derivative(_snake_case ) if abs(prev_value - value ) < tolerance: return value return value if __name__ == "__main__": from doctest import testmod testmod()
281
0
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf __lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__) @dataclass class UpperCAmelCase ( UpperCamelCase__ ): __lowercase = [ """no_inference""", """no_cuda""", """no_tpu""", """no_speed""", """no_memory""", """no_env_print""", """no_multi_process""", ] def __init__( self :Tuple , **lowercase_ :List[str] )-> str: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: A__ = deprecated_arg[3:] A__ = not kwargs.pop(lowercase_ ) logger.warning( F"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or" F" {positive_arg}={kwargs[positive_arg]}" ) A__ = kwargs.pop("tpu_name" , self.tpu_name ) A__ = kwargs.pop("device_idx" , self.device_idx ) A__ = kwargs.pop("eager_mode" , self.eager_mode ) A__ = kwargs.pop("use_xla" , self.use_xla ) super().__init__(**lowercase_ ) __lowercase = field( default=UpperCamelCase__ , metadata={"""help""": """Name of TPU"""} , ) __lowercase = field( default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , ) __lowercase = field(default=UpperCamelCase__ , metadata={"""help""": """Benchmark models in eager model."""} ) __lowercase = field( default=UpperCamelCase__ , metadata={ """help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.""" } , ) @cached_property def UpperCAmelCase_ ( self :int )-> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["tf"] ) A__ = None if self.tpu: try: if self.tpu_name: A__ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: A__ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: A__ = None return tpu @cached_property def UpperCAmelCase_ ( self :int )-> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["tf"] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) A__ = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" ) A__ = tf.distribute.OneDeviceStrategy(device=F"/gpu:{self.device_idx}" ) else: tf.config.set_visible_devices([] , "GPU" ) # disable GPU A__ = tf.distribute.OneDeviceStrategy(device=F"/cpu:{self.device_idx}" ) return strategy @property def UpperCAmelCase_ ( self :List[str] )-> bool: requires_backends(self , ["tf"] ) return self._setup_tpu is not None @property def UpperCAmelCase_ ( self :Tuple )-> "tf.distribute.Strategy": requires_backends(self , ["tf"] ) return self._setup_strategy @property def UpperCAmelCase_ ( self :Optional[Any] )-> Union[str, Any]: requires_backends(self , ["tf"] ) return tf.config.list_physical_devices("GPU" ) @property def UpperCAmelCase_ ( self :str )-> int: requires_backends(self , ["tf"] ) if self.cuda: return len(self.gpu_list ) return 0 @property def UpperCAmelCase_ ( self :int )-> bool: return self.n_gpu > 0
123
'''simple docstring''' from __future__ import annotations from math import gcd def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : int = 2 , _lowerCamelCase : int = 1 , _lowerCamelCase : int = 3 , ): # A value less than 2 can cause an infinite loop in the algorithm. if num < 2: raise ValueError("The input value cannot be less than 2" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ) -> int: return (pow(_lowerCamelCase , 2 ) + step) % modulus for _ in range(_lowerCamelCase ): # These track the position within the cycle detection logic. A__ = seed A__ = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. A__ = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) A__ = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) A__ = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. A__ = gcd(hare - tortoise , _lowerCamelCase ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. A__ = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse __lowerCAmelCase : Optional[int] =argparse.ArgumentParser() parser.add_argument( "num", type=int, help="The value to find a divisor of", ) parser.add_argument( "--attempts", type=int, default=3, help="The number of attempts before giving up", ) __lowerCAmelCase : Optional[int] =parser.parse_args() __lowerCAmelCase : Dict =pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f"""{args.num} is probably prime""") else: __lowerCAmelCase : Optional[Any] =args.num // divisor print(f"""{args.num} = {divisor} * {quotient}""")
123
1
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## A_ : Dict = 16 A_ : List[Any] = 32 def A ( snake_case__ , snake_case__ = 16 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""bert-base-cased""" ) SCREAMING_SNAKE_CASE__ = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(snake_case__ ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE__ = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE__ = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(snake_case__ ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE__ = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE__ = 8 else: SCREAMING_SNAKE_CASE__ = None return tokenizer.pad( snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE__ = DataLoader( tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) SCREAMING_SNAKE_CASE__ = DataLoader( tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders A_ : List[str] = mocked_dataloaders # noqa: F811 def A ( snake_case__ , snake_case__ ): '''simple docstring''' if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1": SCREAMING_SNAKE_CASE__ = 2 # Initialize accelerator SCREAMING_SNAKE_CASE__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE__ = config["""lr"""] SCREAMING_SNAKE_CASE__ = int(config["""num_epochs"""] ) SCREAMING_SNAKE_CASE__ = int(config["""seed"""] ) SCREAMING_SNAKE_CASE__ = int(config["""batch_size"""] ) SCREAMING_SNAKE_CASE__ = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation SCREAMING_SNAKE_CASE__ = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: SCREAMING_SNAKE_CASE__ = batch_size // MAX_GPU_BATCH_SIZE SCREAMING_SNAKE_CASE__ = MAX_GPU_BATCH_SIZE set_seed(snake_case__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_dataloaders(snake_case__ , snake_case__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE__ = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=snake_case__ ) # Instantiate scheduler SCREAMING_SNAKE_CASE__ = get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Now we train the model for epoch in range(snake_case__ ): model.train() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) SCREAMING_SNAKE_CASE__ = model(**snake_case__ ) SCREAMING_SNAKE_CASE__ = outputs.loss SCREAMING_SNAKE_CASE__ = loss / gradient_accumulation_steps accelerator.backward(snake_case__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() SCREAMING_SNAKE_CASE__ = 0 for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ = model(**snake_case__ ) SCREAMING_SNAKE_CASE__ = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((predictions, batch["""labels"""]) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(snake_case__ ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples SCREAMING_SNAKE_CASE__ = predictions[: len(eval_dataloader.dataset ) - samples_seen] SCREAMING_SNAKE_CASE__ = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=snake_case__ , references=snake_case__ , ) SCREAMING_SNAKE_CASE__ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , snake_case__ ) def A ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) SCREAMING_SNAKE_CASE__ = parser.parse_args() SCREAMING_SNAKE_CASE__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
165
"""simple docstring""" from __future__ import annotations import math def A ( snake_case__ ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A ( snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = str(snake_case__ ) SCREAMING_SNAKE_CASE__ = [n] for i in range(1 , len(snake_case__ ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def A ( snake_case__ ): '''simple docstring''' if len(str(snake_case__ ) ) > 3: if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ): return False return True def A ( snake_case__ = 11 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = 13 while len(snake_case__ ) != count: if validate(snake_case__ ): SCREAMING_SNAKE_CASE__ = list_truncated_nums(snake_case__ ) if all(is_prime(snake_case__ ) for i in list_nums ): list_truncated_primes.append(snake_case__ ) num += 2 return list_truncated_primes def A ( ): '''simple docstring''' return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(F'{sum(compute_truncated_primes(11)) = }')
165
1
import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _lowerCAmelCase =UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , ) return model @property def _lowerCAmelCase ( self ) -> Optional[int]: torch.manual_seed(0 ) _lowerCAmelCase =UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , ) return model @property def _lowerCAmelCase ( self ) -> Optional[int]: torch.manual_seed(0 ) _lowerCAmelCase =AutoencoderKL( sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , ) _lowerCAmelCase =UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , ) return vqvae, unet @slow def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =AudioDiffusionPipeline(vqvae=__UpperCAmelCase , unet=self.dummy_unet , mel=__UpperCAmelCase , scheduler=__UpperCAmelCase ) _lowerCAmelCase =pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(42 ) _lowerCAmelCase =pipe(generator=__UpperCAmelCase , steps=4 ) _lowerCAmelCase =output.audios[0] _lowerCAmelCase =output.images[0] _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(42 ) _lowerCAmelCase =pipe(generator=__UpperCAmelCase , steps=4 , return_dict=__UpperCAmelCase ) _lowerCAmelCase =output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) _lowerCAmelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] _lowerCAmelCase =np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10] _lowerCAmelCase =np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 _lowerCAmelCase =Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) _lowerCAmelCase =DDIMScheduler() _lowerCAmelCase =self.dummy_vqvae_and_unet _lowerCAmelCase =AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__UpperCAmelCase , scheduler=__UpperCAmelCase ) _lowerCAmelCase =pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) np.random.seed(0 ) _lowerCAmelCase =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(42 ) _lowerCAmelCase =pipe(raw_audio=__UpperCAmelCase , generator=__UpperCAmelCase , start_step=5 , steps=10 ) _lowerCAmelCase =output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) _lowerCAmelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] _lowerCAmelCase =np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 _lowerCAmelCase =self.dummy_unet_condition _lowerCAmelCase =AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=__UpperCAmelCase , mel=__UpperCAmelCase , scheduler=__UpperCAmelCase ) _lowerCAmelCase =pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) np.random.seed(0 ) _lowerCAmelCase =torch.rand((1, 1, 10) ) _lowerCAmelCase =pipe(generator=__UpperCAmelCase , encoding=__UpperCAmelCase ) _lowerCAmelCase =output.images[0] _lowerCAmelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] _lowerCAmelCase =np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =torch_device _lowerCAmelCase =DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" ) _lowerCAmelCase =pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(42 ) _lowerCAmelCase =pipe(generator=__UpperCAmelCase ) _lowerCAmelCase =output.audios[0] _lowerCAmelCase =output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] _lowerCAmelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] _lowerCAmelCase =np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
369
"""simple docstring""" import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =decoder_seq_length # For common tests _lowerCAmelCase =self.decoder_seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_attention_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =d_model _lowerCAmelCase =d_model _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_ffn_dim _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =eos_token_id _lowerCAmelCase =bos_token_id _lowerCAmelCase =pad_token_id _lowerCAmelCase =decoder_start_token_id _lowerCAmelCase =use_cache _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =None _lowerCAmelCase =decoder_seq_length _lowerCAmelCase =2 _lowerCAmelCase =1 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowerCAmelCase =None if self.use_attention_mask: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) _lowerCAmelCase =None if self.use_labels: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowerCAmelCase =TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]: _lowerCAmelCase =True _lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval() _lowerCAmelCase =input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass _lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 ) _lowerCAmelCase =outputs["""past_key_values"""] # create hypothetical next token and extent to next_input_ids _lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and _lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 ) _lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""] _lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""] # select random slice _lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() _lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs _lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_torch class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else () lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {} lowerCamelCase = True lowerCamelCase = False def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> List[str]: pass def _lowerCAmelCase ( self ) -> List[Any]: pass def _lowerCAmelCase ( self ) -> Any: pass def _lowerCAmelCase ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: return @unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :) def _lowerCAmelCase ( self ) -> str: pass
341
0
'''simple docstring''' import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class __A : def __init__(self : Dict , __a : Optional[Any] , __a : List[Any]=3 , __a : List[str]=7 , __a : int=True , __a : int=True , __a : Dict=False , __a : List[str]=True , __a : int=99 , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : Optional[Any]=37 , __a : str="gelu" , __a : Tuple=0.1 , __a : Optional[int]=0.1 , __a : str=512 , __a : int=16 , __a : str=2 , __a : Tuple=0.02 , __a : str=3 , __a : Tuple=4 , __a : List[str]=None , ): UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = seq_length UpperCAmelCase_ = is_training UpperCAmelCase_ = use_input_mask UpperCAmelCase_ = use_token_type_ids UpperCAmelCase_ = use_labels UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = type_vocab_size UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range UpperCAmelCase_ = num_labels UpperCAmelCase_ = num_choices UpperCAmelCase_ = scope def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ = None if self.use_input_mask: UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None if self.use_labels: UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase (self : Union[str, Any] ): return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__a , ) def _lowercase (self : Optional[int] , __a : Union[str, Any] , __a : int , __a : Any , __a : int , __a : Union[str, Any] , __a : str , __a : int ): UpperCAmelCase_ = FalconModel(config=__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a , attention_mask=__a ) UpperCAmelCase_ = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase (self : Union[str, Any] , __a : str , __a : Optional[int] , __a : List[Any] , __a : Optional[Any] , __a : str , __a : Any , __a : List[Any] , __a : int , __a : List[Any] , ): UpperCAmelCase_ = True UpperCAmelCase_ = FalconModel(__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model( __a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , ) UpperCAmelCase_ = model( __a , attention_mask=__a , encoder_hidden_states=__a , ) UpperCAmelCase_ = model(__a , attention_mask=__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase (self : Optional[Any] , __a : Dict , __a : Any , __a : str , __a : Dict , __a : List[Any] , __a : str , __a : Tuple , __a : int , __a : List[Any] , ): UpperCAmelCase_ = FalconForCausalLM(config=__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase (self : Dict , __a : int , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Optional[int] , __a : Any , ): UpperCAmelCase_ = True UpperCAmelCase_ = True UpperCAmelCase_ = FalconForCausalLM(config=__a ) model.to(__a ) model.eval() # first forward pass UpperCAmelCase_ = model( __a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , use_cache=__a , ) UpperCAmelCase_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase_ = model( __a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_hidden_states=__a , )["hidden_states"][0] UpperCAmelCase_ = model( __a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )["hidden_states"][0] # select random slice UpperCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) ) def _lowercase (self : Tuple ): UpperCAmelCase_ = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = config_and_inputs UpperCAmelCase_ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class __A ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): a__ : str = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) a__ : int = (FalconForCausalLM,) if is_torch_available() else () a__ : Optional[int] = ( { """feature-extraction""": FalconModel, """text-classification""": FalconForSequenceClassification, """text-generation""": FalconForCausalLM, """question-answering""": FalconForQuestionAnswering, """token-classification""": FalconForTokenClassification, """zero-shot""": FalconForSequenceClassification, } if is_torch_available() else {} ) a__ : Any = False a__ : Tuple = False def _lowercase (self : int ): UpperCAmelCase_ = FalconModelTester(self ) UpperCAmelCase_ = ConfigTester(self , config_class=__a , hidden_size=37 ) def _lowercase (self : Tuple ): self.config_tester.run_common_tests() def _lowercase (self : List[str] ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ , *UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: UpperCAmelCase_ = alibi self.model_tester.create_and_check_model(__a , *__a ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = 3 UpperCAmelCase_ = input_dict["input_ids"] UpperCAmelCase_ = input_ids.ne(1 ).to(__a ) UpperCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase_ = FalconForSequenceClassification(__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = 3 UpperCAmelCase_ = "single_label_classification" UpperCAmelCase_ = input_dict["input_ids"] UpperCAmelCase_ = input_ids.ne(1 ).to(__a ) UpperCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase_ = FalconForSequenceClassification(__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _lowercase (self : Any ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = input_dict["input_ids"] UpperCAmelCase_ = FalconForCausalLM(__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a , use_cache=__a ) UpperCAmelCase_ = input_ids.shape[0] UpperCAmelCase_ = model._convert_to_rw_cache(result.past_key_values ) UpperCAmelCase_ = model._convert_cache_to_standard_format(__a , __a ) for layer in range(len(__a ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = 3 UpperCAmelCase_ = "multi_label_classification" UpperCAmelCase_ = input_dict["input_ids"] UpperCAmelCase_ = input_ids.ne(1 ).to(__a ) UpperCAmelCase_ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCAmelCase_ = FalconForSequenceClassification(__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _lowercase (self : Tuple ): # Falcon can have different numbers of KV-heads than the number of query heads, so we need # to override this test to use the right head counts. for model_class in self.all_generative_model_classes: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(__a , "use_cache" ): return UpperCAmelCase_ = model_class(__a ).to(__a ) if "use_cache" not in inputs: UpperCAmelCase_ = True UpperCAmelCase_ = model(**__a ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return UpperCAmelCase_ = ( getattr(__a , "decoder_layers" , __a ) or getattr(__a , "num_decoder_layers" , __a ) or config.num_hidden_layers ) UpperCAmelCase_ = getattr(__a , "num_kv_heads" , config.num_attention_heads ) UpperCAmelCase_ = getattr(__a , "d_model" , config.hidden_size ) UpperCAmelCase_ = embed_dim // num_attention_heads UpperCAmelCase_ = outputs["past_key_values"] self.assertEqual(len(__a ) , __a ) UpperCAmelCase_ , UpperCAmelCase_ = inputs["input_ids"].shape for i in range(__a ): if config.new_decoder_architecture: UpperCAmelCase_ = config.num_attention_heads elif config.multi_query: UpperCAmelCase_ = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class __A ( unittest.TestCase ): @slow def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" ) UpperCAmelCase_ = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" ) model.eval() model.to(__a ) UpperCAmelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a ) UpperCAmelCase_ = ( "My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday." ) UpperCAmelCase_ = model.generate(**__a , do_sample=__a , max_new_tokens=19 ) UpperCAmelCase_ = tokenizer.batch_decode(__a )[0] self.assertEqual(__a , __a ) @slow def _lowercase (self : Dict ): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: UpperCAmelCase_ = AutoTokenizer.from_pretrained(__a ) UpperCAmelCase_ = FalconForCausalLM.from_pretrained(__a ) model.eval() model.to(__a ) UpperCAmelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**__a , do_sample=__a , max_new_tokens=4 ) model.generate(**__a , do_sample=__a , max_new_tokens=4 ) model.generate(**__a , num_beams=2 , max_new_tokens=4 ) @slow def _lowercase (self : Union[str, Any] ): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: UpperCAmelCase_ = AutoTokenizer.from_pretrained(__a ) UpperCAmelCase_ = FalconForCausalLM.from_pretrained(__a ) model.eval() model.to(device=__a ) UpperCAmelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a ) # Test results are the same with and without cache UpperCAmelCase_ = model.generate(**__a , do_sample=__a , max_new_tokens=20 , use_cache=__a ) UpperCAmelCase_ = model.generate(**__a , do_sample=__a , max_new_tokens=20 , use_cache=__a ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
1
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration _UpperCAmelCase : Optional[int] = 5_0000 _UpperCAmelCase : Dict = 5000 _UpperCAmelCase , _UpperCAmelCase : Optional[int] = os.path.split(__file__) _UpperCAmelCase : List[str] = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i in range(UpperCamelCase__ ): snake_case_ = dataset[i] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ): snake_case_ = dataset[i : i + batch_size] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(UpperCamelCase__ ): snake_case_ = dataset[i] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ): snake_case_ = dataset[i : i + batch_size] def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = {'num examples': SPEED_TEST_N_EXAMPLES} snake_case_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] snake_case_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) snake_case_ = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) snake_case_ = generate_example_dataset( os.path.join(UpperCamelCase__ , 'dataset.arrow' ) , UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(UpperCamelCase__ ) ) snake_case_ = func(UpperCamelCase__ , **UpperCamelCase__ ) print('shuffling dataset' ) snake_case_ = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(UpperCamelCase__ ) ) snake_case_ = func( UpperCamelCase__ , **UpperCamelCase__ ) with open(UpperCamelCase__ , 'wb' ) as f: f.write(json.dumps(UpperCamelCase__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
285
0
from math import pi def UpperCamelCase ( _A : int , _A : int )-> float: """simple docstring""" return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
198
import math def UpperCamelCase ( _A : float , _A : float )-> float: """simple docstring""" if ( not isinstance(_A , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * power_factor def UpperCamelCase ( _A : float , _A : float )-> float: """simple docstring""" if ( not isinstance(_A , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
198
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case : List[Any] = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : Optional[Any] = [ "WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST", "WavLMForAudioFrameClassification", "WavLMForCTC", "WavLMForSequenceClassification", "WavLMForXVector", "WavLMModel", "WavLMPreTrainedModel", ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys snake_case : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
281
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase__ = { """configuration_efficientnet""": [ """EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientNetConfig""", """EfficientNetOnnxConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""EfficientNetImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientNetForImageClassification""", """EfficientNetModel""", """EfficientNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
302
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json', } class __a ( __UpperCamelCase ): __snake_case : Optional[Any] = """mra""" def __init__( self : List[str] , UpperCAmelCase : Tuple=5_02_65 , UpperCAmelCase : str=7_68 , UpperCAmelCase : int=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Tuple=30_72 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : List[str]=5_12 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Tuple=0.02 , UpperCAmelCase : int=1e-5 , UpperCAmelCase : Optional[int]="absolute" , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Any="full" , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : Any=1 , UpperCAmelCase : int=0 , UpperCAmelCase : int=2 , **UpperCAmelCase : Tuple , ): super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase ) lowerCAmelCase_ : Union[str, Any] = vocab_size lowerCAmelCase_ : Optional[int] = max_position_embeddings lowerCAmelCase_ : Any = hidden_size lowerCAmelCase_ : List[Any] = num_hidden_layers lowerCAmelCase_ : Tuple = num_attention_heads lowerCAmelCase_ : List[Any] = intermediate_size lowerCAmelCase_ : Dict = hidden_act lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob lowerCAmelCase_ : Any = attention_probs_dropout_prob lowerCAmelCase_ : str = initializer_range lowerCAmelCase_ : str = type_vocab_size lowerCAmelCase_ : str = layer_norm_eps lowerCAmelCase_ : Optional[int] = position_embedding_type lowerCAmelCase_ : Any = block_per_row lowerCAmelCase_ : int = approx_mode lowerCAmelCase_ : Union[str, Any] = initial_prior_first_n_blocks lowerCAmelCase_ : Dict = initial_prior_diagonal_n_blocks
28
from math import factorial, pi def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float: '''simple docstring''' if not isinstance(lowercase__ , (int, float) ): raise ValueError("""maclaurin_sin() requires either an int or float for theta""" ) if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0: raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" ) lowerCAmelCase_ : Optional[int] = float(lowercase__ ) lowerCAmelCase_ : Union[str, Any] = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowercase__ ) ) def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float: '''simple docstring''' if not isinstance(lowercase__ , (int, float) ): raise ValueError("""maclaurin_cos() requires either an int or float for theta""" ) if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0: raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" ) lowerCAmelCase_ : int = float(lowercase__ ) lowerCAmelCase_ : Optional[int] = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowercase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) print(maclaurin_sin(-10, 15)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(10, 15)) print(maclaurin_cos(-10, 15))
28
1
'''simple docstring''' import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def A_ ( snake_case ): def wrapper(*snake_case , **snake_case ): SCREAMING_SNAKE_CASE:Tuple = timeit.default_timer() SCREAMING_SNAKE_CASE:Optional[int] = func(*__lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE:Union[str, Any] = timeit.default_timer() - starttime return delta SCREAMING_SNAKE_CASE:Optional[int] = func.__name__ return wrapper def A_ ( snake_case , snake_case=100 , snake_case=None ): SCREAMING_SNAKE_CASE:Dict = [] SCREAMING_SNAKE_CASE:List[str] = seq_shapes or {} for i in range(__lowerCamelCase ): SCREAMING_SNAKE_CASE:str = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(__lowerCamelCase , _ArrayXD ): SCREAMING_SNAKE_CASE:int = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(__lowerCamelCase , datasets.Value ): if v.dtype == "string": SCREAMING_SNAKE_CASE:Any = "The small grey turtle was surprisingly fast when challenged." else: SCREAMING_SNAKE_CASE:int = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(__lowerCamelCase , datasets.Sequence ): while isinstance(__lowerCamelCase , datasets.Sequence ): SCREAMING_SNAKE_CASE:Union[str, Any] = v.feature SCREAMING_SNAKE_CASE:List[Any] = seq_shapes[k] SCREAMING_SNAKE_CASE:List[str] = np.random.rand(*__lowerCamelCase ).astype(v.dtype ) SCREAMING_SNAKE_CASE:int = data dummy_data.append((i, example) ) return dummy_data def A_ ( snake_case , snake_case , snake_case=100 , snake_case=None ): SCREAMING_SNAKE_CASE:List[str] = generate_examples(__lowerCamelCase , num_examples=__lowerCamelCase , seq_shapes=__lowerCamelCase ) with ArrowWriter(features=__lowerCamelCase , path=__lowerCamelCase ) as writer: for key, record in dummy_data: SCREAMING_SNAKE_CASE:Optional[int] = features.encode_example(__lowerCamelCase ) writer.write(__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = writer.finalize() if not num_final_examples == num_examples: raise ValueError( F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' ) SCREAMING_SNAKE_CASE:Optional[Any] = datasets.Dataset.from_file(filename=__lowerCamelCase , info=datasets.DatasetInfo(features=__lowerCamelCase ) ) return dataset
139
"""simple docstring""" from math import sqrt def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> int: _snake_case = 0 _snake_case = 0 _snake_case = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(__lowerCamelCase , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"{solution() = }")
288
0
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" lowerCAmelCase__ :Tuple = len(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :str = len(matrix[0] ) lowerCAmelCase__ :Any = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for row in range(_SCREAMING_SNAKE_CASE ): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1 , _SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :List[str] = matrix[col][row] / matrix[row][row] for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows lowerCAmelCase__ :List[str] = True for i in range(row + 1 , _SCREAMING_SNAKE_CASE ): if matrix[i][row] != 0: lowerCAmelCase__ :Tuple = matrix[i], matrix[row] lowerCAmelCase__ :Union[str, Any] = False break if reduce: rank -= 1 for i in range(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Optional[int] = matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
355
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE ) ->bool: """simple docstring""" return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') ) def __A (_SCREAMING_SNAKE_CASE ) ->bool: """simple docstring""" lowerCAmelCase__ :int = credit_card_number lowerCAmelCase__ :Tuple = 0 lowerCAmelCase__ :int = len(_SCREAMING_SNAKE_CASE ) - 2 for i in range(_SCREAMING_SNAKE_CASE , -1 , -2 ): # double the value of every second digit lowerCAmelCase__ :Optional[Any] = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 lowerCAmelCase__ :str = cc_number[:i] + str(_SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def __A (_SCREAMING_SNAKE_CASE ) ->bool: """simple docstring""" lowerCAmelCase__ :Optional[int] = F"{credit_card_number} is an invalid credit card number because" if not credit_card_number.isdigit(): print(F"{error_message} it has nonnumerical characters." ) return False if not 13 <= len(_SCREAMING_SNAKE_CASE ) <= 16: print(F"{error_message} of its length." ) return False if not validate_initial_digits(_SCREAMING_SNAKE_CASE ): print(F"{error_message} of its first two digits." ) return False if not luhn_validation(_SCREAMING_SNAKE_CASE ): print(F"{error_message} it fails the Luhn check." ) return False print(F"{credit_card_number} is a valid credit card number." ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("""4111111111111111""") validate_credit_card_number("""32323""")
254
0
def __A ( __lowerCAmelCase )-> int: """simple docstring""" if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError('multiplicative_persistence() only accepts integral values' ) if num < 0: raise ValueError('multiplicative_persistence() does not accept negative values' ) _UpperCAmelCase = 0 _UpperCAmelCase = str(__lowerCAmelCase ) while len(__lowerCAmelCase ) != 1: _UpperCAmelCase = [int(__lowerCAmelCase ) for i in num_string] _UpperCAmelCase = 1 for i in range(0 , len(__lowerCAmelCase ) ): total *= numbers[i] _UpperCAmelCase = str(__lowerCAmelCase ) steps += 1 return steps def __A ( __lowerCAmelCase )-> int: """simple docstring""" if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError('additive_persistence() only accepts integral values' ) if num < 0: raise ValueError('additive_persistence() does not accept negative values' ) _UpperCAmelCase = 0 _UpperCAmelCase = str(__lowerCAmelCase ) while len(__lowerCAmelCase ) != 1: _UpperCAmelCase = [int(__lowerCAmelCase ) for i in num_string] _UpperCAmelCase = 0 for i in range(0 , len(__lowerCAmelCase ) ): total += numbers[i] _UpperCAmelCase = str(__lowerCAmelCase ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
39
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : List[Any] = { """configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : int = [ """MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MegatronBertForCausalLM""", """MegatronBertForMaskedLM""", """MegatronBertForMultipleChoice""", """MegatronBertForNextSentencePrediction""", """MegatronBertForPreTraining""", """MegatronBertForQuestionAnswering""", """MegatronBertForSequenceClassification""", """MegatronBertForTokenClassification""", """MegatronBertModel""", """MegatronBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys A__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
185
0
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _lowerCamelCase : int = logging.get_logger(__name__) _lowerCamelCase : List[Any] = {'''vocab_file''': '''spiece.model'''} _lowerCamelCase : Optional[Any] = { '''vocab_file''': { '''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''', } } class lowerCamelCase (__lowerCamelCase ): """simple docstring""" def __init__( self : Optional[Any], _UpperCAmelCase : str, _UpperCAmelCase : Tuple=False, _UpperCAmelCase : List[str]=True, _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : Any="<s>", _UpperCAmelCase : Any="</s>", _UpperCAmelCase : Optional[int]="<unk>", _UpperCAmelCase : str="<sep>", _UpperCAmelCase : int="<pad>", _UpperCAmelCase : str="<cls>", _UpperCAmelCase : Tuple="<mask>", _UpperCAmelCase : Tuple=["<eop>", "<eod>"], _UpperCAmelCase : Optional[Dict[str, Any]] = None, **_UpperCAmelCase : Any, ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = AddedToken(_UpperCAmelCase, lstrip=_UpperCAmelCase, rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else mask_token SCREAMING_SNAKE_CASE__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_UpperCAmelCase, remove_space=_UpperCAmelCase, keep_accents=_UpperCAmelCase, bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : List[Any] = 3 SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_lower_case SCREAMING_SNAKE_CASE__ : Any = remove_space SCREAMING_SNAKE_CASE__ : Union[str, Any] = keep_accents SCREAMING_SNAKE_CASE__ : Tuple = vocab_file SCREAMING_SNAKE_CASE__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCAmelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. " "See https://pypi.org/project/jieba/ for installation." ) SCREAMING_SNAKE_CASE__ : List[Any] = jieba SCREAMING_SNAKE_CASE__ : int = str.maketrans(" \n", "\u2582\u2583" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def A_ ( self : Dict ) -> Tuple: """simple docstring""" return len(self.sp_model ) def A_ ( self : Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.__dict__.copy() SCREAMING_SNAKE_CASE__ : Optional[int] = None return state def __setstate__( self : List[str], _UpperCAmelCase : Tuple ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = d # for backward compatibility if not hasattr(self, "sp_model_kwargs" ): SCREAMING_SNAKE_CASE__ : List[str] = {} SCREAMING_SNAKE_CASE__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def A_ ( self : Dict, _UpperCAmelCase : str ) -> str: """simple docstring""" if self.remove_space: SCREAMING_SNAKE_CASE__ : List[Any] = " ".join(inputs.strip().split() ) else: SCREAMING_SNAKE_CASE__ : Dict = inputs SCREAMING_SNAKE_CASE__ : Any = outputs.replace("``", "\"" ).replace("''", "\"" ) if not self.keep_accents: SCREAMING_SNAKE_CASE__ : str = unicodedata.normalize("NFKD", _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = "".join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] ) if self.do_lower_case: SCREAMING_SNAKE_CASE__ : List[Any] = outputs.lower() return outputs def A_ ( self : Union[str, Any], _UpperCAmelCase : str ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.preprocess_text(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : str = self.sp_model.encode(_UpperCAmelCase, out_type=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[str] = [] for piece in pieces: if len(_UpperCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): SCREAMING_SNAKE_CASE__ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase, "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: SCREAMING_SNAKE_CASE__ : Tuple = cur_pieces[1:] else: SCREAMING_SNAKE_CASE__ : str = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_UpperCAmelCase ) else: new_pieces.append(_UpperCAmelCase ) return new_pieces def A_ ( self : Union[str, Any], _UpperCAmelCase : str ) -> int: """simple docstring""" return self.sp_model.PieceToId(_UpperCAmelCase ) def A_ ( self : Optional[int], _UpperCAmelCase : int ) -> List[Any]: """simple docstring""" return self.sp_model.IdToPiece(_UpperCAmelCase ) def A_ ( self : Optional[Any], _UpperCAmelCase : Dict ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase, " " ).strip() return out_string def A_ ( self : Optional[Any], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = [self.sep_token_id] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def A_ ( self : Tuple, _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None, _UpperCAmelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase, token_ids_a=_UpperCAmelCase, already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] return ([0] * len(_UpperCAmelCase )) + [1, 1] def A_ ( self : str, _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE__ : int = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def A_ ( self : Any, _UpperCAmelCase : str, _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join( _UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase, "wb" ) as fi: SCREAMING_SNAKE_CASE__ : int = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,) def A_ ( self : Tuple, *_UpperCAmelCase : Optional[int], **_UpperCAmelCase : Optional[int] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = super()._decode(*_UpperCAmelCase, **_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : str = text.replace(" ", "" ).replace("\u2582", " " ).replace("\u2583", "\n" ) return text
191
class lowerCamelCase : """simple docstring""" def __init__( self : int, _UpperCAmelCase : Dict, _UpperCAmelCase : str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = name SCREAMING_SNAKE_CASE__ : Tuple = val def __str__( self : str ) -> List[Any]: """simple docstring""" return F'''{self.__class__.__name__}({self.name}, {self.val})''' def __lt__( self : str, _UpperCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" return self.val < other.val class lowerCamelCase : """simple docstring""" def __init__( self : Optional[Any], _UpperCAmelCase : str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = {} SCREAMING_SNAKE_CASE__ : str = {} SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.build_heap(_UpperCAmelCase ) def __getitem__( self : Union[str, Any], _UpperCAmelCase : Any ) -> Dict: """simple docstring""" return self.get_value(_UpperCAmelCase ) def A_ ( self : int, _UpperCAmelCase : Optional[int] ) -> Any: """simple docstring""" return (idx - 1) // 2 def A_ ( self : Optional[Any], _UpperCAmelCase : Any ) -> Dict: """simple docstring""" return idx * 2 + 1 def A_ ( self : str, _UpperCAmelCase : List[str] ) -> Union[str, Any]: """simple docstring""" return idx * 2 + 2 def A_ ( self : Tuple, _UpperCAmelCase : int ) -> Optional[int]: """simple docstring""" return self.heap_dict[key] def A_ ( self : Optional[int], _UpperCAmelCase : Dict ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = len(_UpperCAmelCase ) - 1 SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_parent_idx(_UpperCAmelCase ) for idx, i in enumerate(_UpperCAmelCase ): SCREAMING_SNAKE_CASE__ : Optional[int] = idx SCREAMING_SNAKE_CASE__ : Optional[Any] = i.val for i in range(_UpperCAmelCase, -1, -1 ): self.sift_down(_UpperCAmelCase, _UpperCAmelCase ) return array def A_ ( self : List[Any], _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple ) -> Tuple: """simple docstring""" while True: SCREAMING_SNAKE_CASE__ : str = self.get_left_child_idx(_UpperCAmelCase ) # noqa: E741 SCREAMING_SNAKE_CASE__ : Tuple = self.get_right_child_idx(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Tuple = idx if l < len(_UpperCAmelCase ) and array[l] < array[idx]: SCREAMING_SNAKE_CASE__ : List[Any] = l if r < len(_UpperCAmelCase ) and array[r] < array[smallest]: SCREAMING_SNAKE_CASE__ : int = r if smallest != idx: SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = array[smallest], array[idx] ( ( SCREAMING_SNAKE_CASE__ ) ,( SCREAMING_SNAKE_CASE__ ) , ) : Optional[Any] = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) SCREAMING_SNAKE_CASE__ : Optional[Any] = smallest else: break def A_ ( self : Union[str, Any], _UpperCAmelCase : str ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.get_parent_idx(_UpperCAmelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = self.heap[idx], self.heap[p] SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) SCREAMING_SNAKE_CASE__ : Dict = p SCREAMING_SNAKE_CASE__ : List[str] = self.get_parent_idx(_UpperCAmelCase ) def A_ ( self : str ) -> List[str]: """simple docstring""" return self.heap[0] def A_ ( self : Tuple ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[int] = self.heap[-1], self.heap[0] SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) SCREAMING_SNAKE_CASE__ : Any = self.heap.pop() del self.idx_of_element[x] self.sift_down(0, self.heap ) return x def A_ ( self : Union[str, Any], _UpperCAmelCase : str ) -> Optional[Any]: """simple docstring""" self.heap.append(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : str = len(self.heap ) - 1 SCREAMING_SNAKE_CASE__ : Optional[Any] = node.val self.sift_up(len(self.heap ) - 1 ) def A_ ( self : Optional[int] ) -> int: """simple docstring""" return len(self.heap ) == 0 def A_ ( self : Any, _UpperCAmelCase : Tuple, _UpperCAmelCase : str ) -> Dict: """simple docstring""" assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" SCREAMING_SNAKE_CASE__ : Tuple = new_value SCREAMING_SNAKE_CASE__ : List[Any] = new_value self.sift_up(self.idx_of_element[node] ) _lowerCamelCase : Tuple = Node('''R''', -1) _lowerCamelCase : int = Node('''B''', 6) _lowerCamelCase : str = Node('''A''', 3) _lowerCamelCase : Optional[Any] = Node('''X''', 1) _lowerCamelCase : str = Node('''E''', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array _lowerCamelCase : int = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('''Min Heap - before decrease key''') for i in my_min_heap.heap: print(i) print('''Min Heap - After decrease key of node [B -> -17]''') my_min_heap.decrease_key(b, -1_7) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
191
1
"""simple docstring""" import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa snake_case_ = logging.getLogger(__name__) class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = """summarization""" __UpperCamelCase = ["""loss"""] __UpperCamelCase = ROUGE_KEYS __UpperCamelCase = """rouge2""" def __init__( self :Optional[int] , lowercase_ :Optional[int] , **lowercase_ :List[Any] ) -> Dict: if hparams.sortish_sampler and hparams.gpus > 1: UpperCAmelCase = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' ) if hparams.sortish_sampler: raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' ) super().__init__(lowercase_ , num_labels=lowercase_ , mode=self.mode , **lowercase_ ) use_task_specific_params(self.model , 'summarization' ) save_git_info(self.hparams.output_dir ) UpperCAmelCase = Path(self.output_dir ) / 'metrics.json' UpperCAmelCase = Path(self.output_dir ) / 'hparams.pkl' pickle_save(self.hparams , self.hparams_save_path ) UpperCAmelCase = 0 UpperCAmelCase = defaultdict(lowercase_ ) UpperCAmelCase = self.config.model_type UpperCAmelCase = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size UpperCAmelCase = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } UpperCAmelCase = { 'train': self.hparams.n_train, 'val': self.hparams.n_val, 'test': self.hparams.n_test, } UpperCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} UpperCAmelCase = { 'train': self.hparams.max_target_length, 'val': self.hparams.val_max_target_length, 'test': self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f"""target_lens: {self.target_lens}""" assert self.target_lens["train"] <= self.target_lens["test"], f"""target_lens: {self.target_lens}""" if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) UpperCAmelCase = get_git_info()['repo_sha'] UpperCAmelCase = hparams.num_workers UpperCAmelCase = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase_ ): UpperCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang] UpperCAmelCase = self.decoder_start_token_id UpperCAmelCase = ( SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset ) UpperCAmelCase = False UpperCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: UpperCAmelCase = self.hparams.eval_max_gen_length else: UpperCAmelCase = self.model.config.max_length UpperCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def UpperCAmelCase__ ( self :str , lowercase_ :Dict[str, torch.Tensor] ) -> Dict[str, List[str]]: UpperCAmelCase = { k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items() } save_json(lowercase_ , Path(self.output_dir ) / 'text_batch.json' ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' ) UpperCAmelCase = True return readable_batch def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[Any] , **lowercase_ :int ) -> Optional[int]: return self.model(lowercase_ , **lowercase_ ) def UpperCAmelCase__ ( self :Any , lowercase_ :List[int] ) -> Optional[int]: UpperCAmelCase = self.tokenizer.batch_decode( lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ ) return lmap(str.strip , lowercase_ ) def UpperCAmelCase__ ( self :List[str] , lowercase_ :dict ) -> Tuple: UpperCAmelCase = self.tokenizer.pad_token_id UpperCAmelCase , UpperCAmelCase = batch['input_ids'], batch['attention_mask'] UpperCAmelCase = batch['labels'] if isinstance(self.model , lowercase_ ): UpperCAmelCase = self.model._shift_right(lowercase_ ) else: UpperCAmelCase = shift_tokens_right(lowercase_ , lowercase_ ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero UpperCAmelCase = decoder_input_ids self.save_readable_batch(lowercase_ ) UpperCAmelCase = self(lowercase_ , attention_mask=lowercase_ , decoder_input_ids=lowercase_ , use_cache=lowercase_ ) UpperCAmelCase = outputs['logits'] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id UpperCAmelCase = nn.CrossEntropyLoss(ignore_index=lowercase_ ) assert lm_logits.shape[-1] == self.vocab_size UpperCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: UpperCAmelCase = nn.functional.log_softmax(lowercase_ , dim=-1 ) UpperCAmelCase , UpperCAmelCase = label_smoothed_nll_loss( lowercase_ , lowercase_ , self.hparams.label_smoothing , ignore_index=lowercase_ ) return (loss,) @property def UpperCAmelCase__ ( self :Union[str, Any] ) -> int: return self.tokenizer.pad_token_id def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Tuple , lowercase_ :List[Any] ) -> Dict: UpperCAmelCase = self._step(lowercase_ ) UpperCAmelCase = dict(zip(self.loss_names , lowercase_ ) ) # tokens per batch UpperCAmelCase = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum() UpperCAmelCase = batch['input_ids'].shape[0] UpperCAmelCase = batch['input_ids'].eq(self.pad ).sum() UpperCAmelCase = batch['input_ids'].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :List[str] ) -> Dict: return self._generative_step(lowercase_ ) def UpperCAmelCase__ ( self :Tuple , lowercase_ :int , lowercase_ :str="val" ) -> Dict: self.step_count += 1 UpperCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} UpperCAmelCase = losses['loss'] UpperCAmelCase = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len'] } UpperCAmelCase = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) UpperCAmelCase = torch.tensor(lowercase_ ).type_as(lowercase_ ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(lowercase_ ) UpperCAmelCase = {f"""{prefix}_avg_{k}""": x for k, x in losses.items()} UpperCAmelCase = self.step_count self.metrics[prefix].append(lowercase_ ) # callback writes this to self.metrics_save_path UpperCAmelCase = flatten_list([x['preds'] for x in outputs] ) return { "log": all_metrics, "preds": preds, f"""{prefix}_loss""": loss, f"""{prefix}_{self.val_metric}""": metric_tensor, } def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Tuple , lowercase_ :List[str] ) -> Dict: return calculate_rouge(lowercase_ , lowercase_ ) def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :dict ) -> dict: UpperCAmelCase = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') UpperCAmelCase = self.model.generate( batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowercase_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) UpperCAmelCase = (time.time() - ta) / batch['input_ids'].shape[0] UpperCAmelCase = self.ids_to_clean_text(lowercase_ ) UpperCAmelCase = self.ids_to_clean_text(batch['labels'] ) UpperCAmelCase = self._step(lowercase_ ) UpperCAmelCase = dict(zip(self.loss_names , lowercase_ ) ) UpperCAmelCase = self.calc_generative_metrics(lowercase_ , lowercase_ ) UpperCAmelCase = np.mean(lmap(lowercase_ , lowercase_ ) ) base_metrics.update(gen_time=lowercase_ , gen_len=lowercase_ , preds=lowercase_ , target=lowercase_ , **lowercase_ ) return base_metrics def UpperCAmelCase__ ( self :Dict , lowercase_ :Optional[Any] , lowercase_ :Dict ) -> List[Any]: return self._generative_step(lowercase_ ) def UpperCAmelCase__ ( self :Any , lowercase_ :List[Any] ) -> int: return self.validation_epoch_end(lowercase_ , prefix='test' ) def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Any ) -> SeqaSeqDataset: UpperCAmelCase = self.n_obs[type_path] UpperCAmelCase = self.target_lens[type_path] UpperCAmelCase = self.dataset_class( self.tokenizer , type_path=lowercase_ , n_obs=lowercase_ , max_target_length=lowercase_ , **self.dataset_kwargs , ) return dataset def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str , lowercase_ :int , lowercase_ :bool = False ) -> DataLoader: UpperCAmelCase = self.get_dataset(lowercase_ ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": UpperCAmelCase = dataset.make_sortish_sampler(lowercase_ , distributed=self.hparams.gpus > 1 ) return DataLoader( lowercase_ , batch_size=lowercase_ , collate_fn=dataset.collate_fn , shuffle=lowercase_ , num_workers=self.num_workers , sampler=lowercase_ , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": UpperCAmelCase = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( lowercase_ , batch_sampler=lowercase_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( lowercase_ , batch_size=lowercase_ , collate_fn=dataset.collate_fn , shuffle=lowercase_ , num_workers=self.num_workers , sampler=lowercase_ , ) def UpperCAmelCase__ ( self :Optional[int] ) -> DataLoader: UpperCAmelCase = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowercase_ ) return dataloader def UpperCAmelCase__ ( self :Optional[int] ) -> DataLoader: return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size ) def UpperCAmelCase__ ( self :List[Any] ) -> DataLoader: return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size ) @staticmethod def UpperCAmelCase__ ( lowercase_ :List[Any] , lowercase_ :Tuple ) -> List[Any]: BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ ) add_generic_args(lowercase_ , lowercase_ ) parser.add_argument( '--max_source_length' , default=10_24 , type=lowercase_ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--max_target_length' , default=56 , type=lowercase_ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--val_max_target_length' , default=1_42 , type=lowercase_ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--test_max_target_length' , default=1_42 , type=lowercase_ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument('--freeze_encoder' , action='store_true' ) parser.add_argument('--freeze_embeds' , action='store_true' ) parser.add_argument('--sortish_sampler' , action='store_true' , default=lowercase_ ) parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowercase_ ) parser.add_argument('--max_tokens_per_batch' , type=lowercase_ , default=lowercase_ ) parser.add_argument('--logger_name' , type=lowercase_ , choices=['default', 'wandb', 'wandb_shared'] , default='default' ) parser.add_argument('--n_train' , type=lowercase_ , default=-1 , required=lowercase_ , help='# examples. -1 means use all.' ) parser.add_argument('--n_val' , type=lowercase_ , default=5_00 , required=lowercase_ , help='# examples. -1 means use all.' ) parser.add_argument('--n_test' , type=lowercase_ , default=-1 , required=lowercase_ , help='# examples. -1 means use all.' ) parser.add_argument( '--task' , type=lowercase_ , default='summarization' , required=lowercase_ , help='# examples. -1 means use all.' ) parser.add_argument('--label_smoothing' , type=lowercase_ , default=0.0 , required=lowercase_ ) parser.add_argument('--src_lang' , type=lowercase_ , default='' , required=lowercase_ ) parser.add_argument('--tgt_lang' , type=lowercase_ , default='' , required=lowercase_ ) parser.add_argument('--eval_beams' , type=lowercase_ , default=lowercase_ , required=lowercase_ ) parser.add_argument( '--val_metric' , type=lowercase_ , default=lowercase_ , required=lowercase_ , choices=['bleu', 'rouge2', 'loss', None] ) parser.add_argument('--eval_max_gen_length' , type=lowercase_ , default=lowercase_ , help='never generate more than n tokens' ) parser.add_argument('--save_top_k' , type=lowercase_ , default=1 , required=lowercase_ , help='How many checkpoints to save' ) parser.add_argument( '--early_stopping_patience' , type=lowercase_ , default=-1 , required=lowercase_ , help=( '-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So' ' val_check_interval will effect it.' ) , ) return parser class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = """translation""" __UpperCamelCase = ["""loss"""] __UpperCamelCase = ["""bleu"""] __UpperCamelCase = """bleu""" def __init__( self :List[str] , lowercase_ :Tuple , **lowercase_ :List[Any] ) -> Optional[int]: super().__init__(lowercase_ , **lowercase_ ) UpperCAmelCase = hparams.src_lang UpperCAmelCase = hparams.tgt_lang def UpperCAmelCase__ ( self :List[str] , lowercase_ :Dict , lowercase_ :Optional[Any] ) -> dict: return calculate_bleu(lowercase_ , lowercase_ ) def _lowerCAmelCase ( lowercase_ , lowercase_=None ): Path(args.output_dir ).mkdir(exist_ok=lowercase_ ) check_output_dir(lowercase_ , expected_items=3 ) if model is None: if "summarization" in args.task: UpperCAmelCase = SummarizationModule(lowercase_ ) else: UpperCAmelCase = TranslationModule(lowercase_ ) UpperCAmelCase = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith('/tmp' ) or str(args.output_dir ).startswith('/var' ) ): UpperCAmelCase = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger UpperCAmelCase = os.environ.get('WANDB_PROJECT' , lowercase_ ) UpperCAmelCase = WandbLogger(name=model.output_dir.name , project=lowercase_ ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger UpperCAmelCase = WandbLogger(name=model.output_dir.name , project=F"""hf_{dataset}""" ) if args.early_stopping_patience >= 0: UpperCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: UpperCAmelCase = False UpperCAmelCase = args.val_metric == 'loss' UpperCAmelCase = generic_train( lowercase_ , lowercase_ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , lowercase_ ) , early_stopping_callback=lowercase_ , logger=lowercase_ , ) pickle_save(model.hparams , model.output_dir / 'hparams.pkl' ) if not args.do_predict: return model UpperCAmelCase = '' UpperCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=lowercase_ ) ) if checkpoints: UpperCAmelCase = checkpoints[-1] UpperCAmelCase = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": snake_case_ = argparse.ArgumentParser() snake_case_ = pl.Trainer.add_argparse_args(parser) snake_case_ = SummarizationModule.add_model_specific_args(parser, os.getcwd()) snake_case_ = parser.parse_args() main(args)
78
'''simple docstring''' import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class UpperCamelCase__ : """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=14 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Union[str, Any]=0.02 , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = parent SCREAMING_SNAKE_CASE : Optional[int] = batch_size SCREAMING_SNAKE_CASE : Any = seq_length SCREAMING_SNAKE_CASE : List[str] = is_training SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE : str = vocab_size SCREAMING_SNAKE_CASE : str = hidden_size SCREAMING_SNAKE_CASE : List[Any] = rotary_dim SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : Tuple = num_attention_heads SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Dict = vocab_size - 1 SCREAMING_SNAKE_CASE : str = vocab_size - 1 SCREAMING_SNAKE_CASE : List[Any] = vocab_size - 1 def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE : List[str] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = 20 SCREAMING_SNAKE_CASE : Any = model_class_name(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(input_ids.shape[0] , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE : Any = model( input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) SCREAMING_SNAKE_CASE : str = model( input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 20 SCREAMING_SNAKE_CASE : Dict = model_class_name(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) SCREAMING_SNAKE_CASE : str = model.init_cache(input_ids.shape[0] , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE : Any = model( input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) SCREAMING_SNAKE_CASE : Dict = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) @require_flax class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () SCREAMING_SNAKE_CASE__ = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxGPTJModelTester(self ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) @tooslow def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id SCREAMING_SNAKE_CASE : str = jax.jit(model.generate ) SCREAMING_SNAKE_CASE : str = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) @is_pt_flax_cross_test def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = 1 SCREAMING_SNAKE_CASE : Optional[int] = pt_model_class(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = fx_state with torch.no_grad(): SCREAMING_SNAKE_CASE : Any = pt_model(**lowerCamelCase_ ).to_tuple() SCREAMING_SNAKE_CASE : Any = fx_model(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = fx_model_loaded(**lowerCamelCase_ ).to_tuple() self.assertEqual( len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = pt_model_class(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Union[str, Any] = 0 SCREAMING_SNAKE_CASE : Dict = 1 SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Tuple = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = pt_model(**lowerCamelCase_ ).to_tuple() SCREAMING_SNAKE_CASE : Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ ) with torch.no_grad(): SCREAMING_SNAKE_CASE : str = pt_model_loaded(**lowerCamelCase_ ).to_tuple() self.assertEqual( len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase_ )
323
0
'''simple docstring''' import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : str = (DDIMParallelScheduler,) lowerCAmelCase : List[Any] = (('eta', 0.0), ('num_inference_steps', 5_0)) def __lowercase ( self : Tuple ,**_UpperCAmelCase : Union[str, Any] ): _a : Union[str, Any] = { 'num_train_timesteps': 1000, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', 'clip_sample': True, } config.update(**_UpperCAmelCase ) return config def __lowercase ( self : Dict ,**_UpperCAmelCase : Optional[int] ): _a : List[str] = self.scheduler_classes[0] _a : Optional[int] = self.get_scheduler_config(**_UpperCAmelCase ) _a : List[str] = scheduler_class(**_UpperCAmelCase ) _a , _a : Optional[int] = 10, 0.0 _a : Optional[Any] = self.dummy_model() _a : Dict = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for t in scheduler.timesteps: _a : Union[str, Any] = model(_UpperCAmelCase ,_UpperCAmelCase ) _a : int = scheduler.step(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ).prev_sample return sample def __lowercase ( self : Dict ): for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_UpperCAmelCase ) _a : List[str] = self.scheduler_classes[0] _a : Optional[int] = self.get_scheduler_config(steps_offset=1 ) _a : Dict = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) ) def __lowercase ( self : Any ): for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_UpperCAmelCase ,beta_end=_UpperCAmelCase ) def __lowercase ( self : int ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def __lowercase ( self : Tuple ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def __lowercase ( self : Optional[int] ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=_UpperCAmelCase ) def __lowercase ( self : int ): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=_UpperCAmelCase ) def __lowercase ( self : Union[str, Any] ): self.check_over_configs(thresholding=_UpperCAmelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=_UpperCAmelCase ,prediction_type=_UpperCAmelCase ,sample_max_value=_UpperCAmelCase ,) def __lowercase ( self : Tuple ): for t in [1, 10, 49]: self.check_over_forward(time_step=_UpperCAmelCase ) def __lowercase ( self : Union[str, Any] ): for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ): self.check_over_forward(time_step=_UpperCAmelCase ,num_inference_steps=_UpperCAmelCase ) def __lowercase ( self : Union[str, Any] ): for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ): self.check_over_forward(time_step=_UpperCAmelCase ,eta=_UpperCAmelCase ) def __lowercase ( self : Union[str, Any] ): _a : List[str] = self.scheduler_classes[0] _a : List[str] = self.get_scheduler_config() _a : Optional[int] = scheduler_class(**_UpperCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1E-5 def __lowercase ( self : Tuple ): _a : Union[str, Any] = self.scheduler_classes[0] _a : Tuple = self.get_scheduler_config() _a : Any = scheduler_class(**_UpperCAmelCase ) _a , _a : List[Any] = 10, 0.0 scheduler.set_timesteps(_UpperCAmelCase ) _a : Dict = self.dummy_model() _a : Any = self.dummy_sample_deter _a : Any = self.dummy_sample_deter + 0.1 _a : str = self.dummy_sample_deter - 0.1 _a : Dict = samplea.shape[0] _a : Optional[int] = torch.stack([samplea, samplea, samplea] ,dim=0 ) _a : List[str] = torch.arange(_UpperCAmelCase )[0:3, None].repeat(1 ,_UpperCAmelCase ) _a : List[Any] = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) ) _a : Optional[Any] = scheduler.batch_step_no_noise(_UpperCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,_UpperCAmelCase ) _a : Optional[Any] = torch.sum(torch.abs(_UpperCAmelCase ) ) _a : Tuple = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2 assert abs(result_mean.item() - 0.49_82 ) < 1E-3 def __lowercase ( self : Any ): _a : Any = self.full_loop() _a : List[str] = torch.sum(torch.abs(_UpperCAmelCase ) ) _a : Optional[int] = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2 assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3 def __lowercase ( self : str ): _a : Optional[Any] = self.full_loop(prediction_type='v_prediction' ) _a : List[Any] = torch.sum(torch.abs(_UpperCAmelCase ) ) _a : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 52.53_02 ) < 1E-2 assert abs(result_mean.item() - 0.06_84 ) < 1E-3 def __lowercase ( self : Dict ): # We specify different beta, so that the first alpha is 0.99 _a : Tuple = self.full_loop(set_alpha_to_one=_UpperCAmelCase ,beta_start=0.01 ) _a : int = torch.sum(torch.abs(_UpperCAmelCase ) ) _a : Any = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2 assert abs(result_mean.item() - 0.19_51 ) < 1E-3 def __lowercase ( self : Tuple ): # We specify different beta, so that the first alpha is 0.99 _a : int = self.full_loop(set_alpha_to_one=_UpperCAmelCase ,beta_start=0.01 ) _a : Any = torch.sum(torch.abs(_UpperCAmelCase ) ) _a : List[str] = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2 assert abs(result_mean.item() - 0.19_41 ) < 1E-3
107
'''simple docstring''' from math import sqrt def __lowerCamelCase ( lowerCAmelCase_ ) -> int: _a : Dict = 0 for i in range(1 , int(sqrt(lowerCAmelCase_ ) + 1 ) ): if n % i == 0 and i != sqrt(lowerCAmelCase_ ): total += i + n // i elif i == sqrt(lowerCAmelCase_ ): total += i return total - n def __lowerCamelCase ( lowerCAmelCase_ = 10000 ) -> int: _a : Union[str, Any] = sum( i for i in range(1 , lowerCAmelCase_ ) if sum_of_divisors(sum_of_divisors(lowerCAmelCase_ ) ) == i and sum_of_divisors(lowerCAmelCase_ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
107
1
'''simple docstring''' import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): def __magic_name__ ( self : Any ) -> int: SCREAMING_SNAKE_CASE__ : Optional[int] =self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__lowercase , '''width_multiplier''' ) ) class __SCREAMING_SNAKE_CASE : def __init__( self : Tuple , __lowercase : str , __lowercase : Optional[Any]=13 , __lowercase : List[str]=64 , __lowercase : int=2 , __lowercase : List[str]=3 , __lowercase : List[Any]="swish" , __lowercase : Any=3 , __lowercase : Optional[int]=32 , __lowercase : List[Any]=0.1 , __lowercase : Tuple=0.02 , __lowercase : List[Any]=True , __lowercase : int=True , __lowercase : Dict=10 , __lowercase : Any=None , __lowercase : List[str]=0.25 , __lowercase : str=0.0 , __lowercase : Tuple=0.0 , ) -> List[str]: SCREAMING_SNAKE_CASE__ : Any =parent SCREAMING_SNAKE_CASE__ : Union[str, Any] =batch_size SCREAMING_SNAKE_CASE__ : Optional[int] =image_size SCREAMING_SNAKE_CASE__ : str =patch_size SCREAMING_SNAKE_CASE__ : int =num_channels SCREAMING_SNAKE_CASE__ : Optional[Any] =make_divisible(5_12 * width_multiplier , divisor=8 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act SCREAMING_SNAKE_CASE__ : int =conv_kernel_size SCREAMING_SNAKE_CASE__ : List[str] =output_stride SCREAMING_SNAKE_CASE__ : str =classifier_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] =use_labels SCREAMING_SNAKE_CASE__ : Optional[Any] =is_training SCREAMING_SNAKE_CASE__ : List[Any] =num_labels SCREAMING_SNAKE_CASE__ : List[Any] =initializer_range SCREAMING_SNAKE_CASE__ : Optional[Any] =scope SCREAMING_SNAKE_CASE__ : Dict =width_multiplier SCREAMING_SNAKE_CASE__ : Optional[int] =ffn_dropout SCREAMING_SNAKE_CASE__ : Any =attn_dropout def __magic_name__ ( self : Optional[Any] ) -> Tuple: SCREAMING_SNAKE_CASE__ : Dict =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : Optional[int] =None SCREAMING_SNAKE_CASE__ : str =None if self.use_labels: SCREAMING_SNAKE_CASE__ : str =ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) SCREAMING_SNAKE_CASE__ : Tuple =self.get_config() return config, pixel_values, labels, pixel_labels def __magic_name__ ( self : Optional[Any] ) -> int: return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def __magic_name__ ( self : Optional[Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : List[Any] , __lowercase : List[str] ) -> Any: SCREAMING_SNAKE_CASE__ : Tuple =MobileViTVaModel(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __magic_name__ ( self : List[Any] , __lowercase : str , __lowercase : int , __lowercase : Dict , __lowercase : Optional[Any] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Tuple =self.num_labels SCREAMING_SNAKE_CASE__ : int =MobileViTVaForImageClassification(__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : int =model(__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : List[str] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : List[str] =self.num_labels SCREAMING_SNAKE_CASE__ : int =MobileViTVaForSemanticSegmentation(__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) SCREAMING_SNAKE_CASE__ : int =model(__lowercase , labels=__lowercase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __magic_name__ ( self : str ) -> List[Any]: SCREAMING_SNAKE_CASE__ : List[str] =self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] =config_and_inputs SCREAMING_SNAKE_CASE__ : str ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): snake_case_ = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) snake_case_ = ( { """feature-extraction""": MobileViTVaModel, """image-classification""": MobileViTVaForImageClassification, """image-segmentation""": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def __magic_name__ ( self : Dict ) -> List[str]: SCREAMING_SNAKE_CASE__ : Dict =MobileViTVaModelTester(self ) SCREAMING_SNAKE_CASE__ : List[str] =MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase ) def __magic_name__ ( self : Any ) -> str: self.config_tester.run_common_tests() @unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' ) def __magic_name__ ( self : Dict ) -> int: pass @unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' ) def __magic_name__ ( self : List[str] ) -> Optional[int]: pass @unittest.skip(reason='''MobileViTV2 does not output attentions''' ) def __magic_name__ ( self : Any ) -> Union[str, Any]: pass @require_torch_multi_gpu @unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' ) def __magic_name__ ( self : Optional[int] ) -> List[str]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __magic_name__ ( self : str ) -> int: pass def __magic_name__ ( self : Any ) -> str: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Any =model_class(__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : str =[*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : Dict =['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) def __magic_name__ ( self : str ) -> List[Any]: SCREAMING_SNAKE_CASE__ : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def __magic_name__ ( self : Any ) -> Optional[int]: def check_hidden_states_output(__lowercase : int , __lowercase : Tuple , __lowercase : Any ): SCREAMING_SNAKE_CASE__ : List[str] =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Optional[Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) ) SCREAMING_SNAKE_CASE__ : List[str] =outputs.hidden_states SCREAMING_SNAKE_CASE__ : Optional[int] =5 self.assertEqual(len(__lowercase ) , __lowercase ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. SCREAMING_SNAKE_CASE__ : int =2 for i in range(len(__lowercase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : int =True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : Optional[int] =True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) def __magic_name__ ( self : Optional[int] ) -> Dict: SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowercase ) def __magic_name__ ( self : List[Any] ) -> Any: SCREAMING_SNAKE_CASE__ : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase ) @slow def __magic_name__ ( self : str ) -> Optional[Any]: for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : str =MobileViTVaModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def __magic_name__ ( self : List[str] ) -> Tuple: return ( MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ) if is_vision_available() else None ) @slow def __magic_name__ ( self : Tuple ) -> List[str]: SCREAMING_SNAKE_CASE__ : Dict =MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to( __lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =self.default_image_processor SCREAMING_SNAKE_CASE__ : Optional[int] =prepare_img() SCREAMING_SNAKE_CASE__ : List[str] =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : List[str] =model(**__lowercase ) # verify the logits SCREAMING_SNAKE_CASE__ : Dict =torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowercase ) SCREAMING_SNAKE_CASE__ : Dict =torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) ) @slow def __magic_name__ ( self : List[Any] ) -> str: SCREAMING_SNAKE_CASE__ : str =MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) SCREAMING_SNAKE_CASE__ : List[Any] =model.to(__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) SCREAMING_SNAKE_CASE__ : Tuple =prepare_img() SCREAMING_SNAKE_CASE__ : str =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Dict =model(**__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =outputs.logits # verify the logits SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , __lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =torch.tensor( [ [[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]], [[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]], [[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]], ] , device=__lowercase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1e-4 ) ) @slow def __magic_name__ ( self : Dict ) -> Tuple: SCREAMING_SNAKE_CASE__ : Any =MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) SCREAMING_SNAKE_CASE__ : int =model.to(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) SCREAMING_SNAKE_CASE__ : List[Any] =prepare_img() SCREAMING_SNAKE_CASE__ : List[Any] =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : List[str] =model(**__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =outputs.logits.detach().cpu() SCREAMING_SNAKE_CASE__ : List[Any] =image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , __lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =image_processor.post_process_semantic_segmentation(outputs=__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , __lowercase )
152
def a ( A__ : int = 1000000 ) -> int: """simple docstring""" _lowercase =1 _lowercase =1 _lowercase ={1: 1} for inputa in range(2 , A__ ): _lowercase =0 _lowercase =inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: _lowercase =(3 * number) + 1 counter += 1 if inputa not in counters: _lowercase =counter if counter > pre_counter: _lowercase =inputa _lowercase =counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
205
0
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow A__ : Optional[Any] =logging.getLogger() @unittest.skip('''Temporarily disable the doc tests.''' ) @require_torch @require_tf @slow class UpperCAmelCase ( unittest.TestCase ): def lowercase__ ( self : Optional[Any] , __snake_case : Path , __snake_case : Union[str, None] = None , __snake_case : Union[List[str], None] = None , __snake_case : Union[str, List[str], None] = None , __snake_case : bool = True , ) -> Tuple: _lowerCAmelCase = [file for file in os.listdir(__snake_case ) if os.path.isfile(os.path.join(__snake_case , __snake_case ) )] if identifier is not None: _lowerCAmelCase = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(__snake_case , __snake_case ): for n_ in n_identifier: _lowerCAmelCase = [file for file in files if n_ not in file] else: _lowerCAmelCase = [file for file in files if n_identifier not in file] _lowerCAmelCase = ignore_files or [] ignore_files.append("""__init__.py""" ) _lowerCAmelCase = [file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , __snake_case ) if only_modules: _lowerCAmelCase = file.split(""".""" )[0] try: _lowerCAmelCase = getattr(__snake_case , __snake_case ) _lowerCAmelCase = doctest.DocTestSuite(__snake_case ) _lowerCAmelCase = unittest.TextTestRunner().run(__snake_case ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f"{module_identifier} is not a module." ) else: _lowerCAmelCase = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def lowercase__ ( self : Union[str, Any] ) -> List[str]: _lowerCAmelCase = Path("""src/transformers""" ) _lowerCAmelCase = """modeling""" _lowerCAmelCase = [ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(__snake_case , identifier=__snake_case , ignore_files=__snake_case ) def lowercase__ ( self : Optional[int] ) -> str: _lowerCAmelCase = Path("""src/transformers""" ) _lowerCAmelCase = """tokenization""" self.analyze_directory(__snake_case , identifier=__snake_case ) def lowercase__ ( self : str ) -> List[str]: _lowerCAmelCase = Path("""src/transformers""" ) _lowerCAmelCase = """configuration""" self.analyze_directory(__snake_case , identifier=__snake_case ) def lowercase__ ( self : str ) -> str: _lowerCAmelCase = Path("""src/transformers""" ) _lowerCAmelCase = ["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(__snake_case , n_identifier=__snake_case ) def lowercase__ ( self : Tuple ) -> Any: _lowerCAmelCase = Path("""docs/source""" ) _lowerCAmelCase = ["""favicon.ico"""] self.analyze_directory(__snake_case , ignore_files=__snake_case , only_modules=__snake_case )
220
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool A__ : List[str] ={ '''Acehnese Arabic''': '''ace_Arab''', '''Acehnese Latin''': '''ace_Latn''', '''Mesopotamian Arabic''': '''acm_Arab''', '''Ta\'izzi-Adeni Arabic''': '''acq_Arab''', '''Tunisian Arabic''': '''aeb_Arab''', '''Afrikaans''': '''afr_Latn''', '''South Levantine Arabic''': '''ajp_Arab''', '''Akan''': '''aka_Latn''', '''Amharic''': '''amh_Ethi''', '''North Levantine Arabic''': '''apc_Arab''', '''Modern Standard Arabic''': '''arb_Arab''', '''Modern Standard Arabic Romanized''': '''arb_Latn''', '''Najdi Arabic''': '''ars_Arab''', '''Moroccan Arabic''': '''ary_Arab''', '''Egyptian Arabic''': '''arz_Arab''', '''Assamese''': '''asm_Beng''', '''Asturian''': '''ast_Latn''', '''Awadhi''': '''awa_Deva''', '''Central Aymara''': '''ayr_Latn''', '''South Azerbaijani''': '''azb_Arab''', '''North Azerbaijani''': '''azj_Latn''', '''Bashkir''': '''bak_Cyrl''', '''Bambara''': '''bam_Latn''', '''Balinese''': '''ban_Latn''', '''Belarusian''': '''bel_Cyrl''', '''Bemba''': '''bem_Latn''', '''Bengali''': '''ben_Beng''', '''Bhojpuri''': '''bho_Deva''', '''Banjar Arabic''': '''bjn_Arab''', '''Banjar Latin''': '''bjn_Latn''', '''Standard Tibetan''': '''bod_Tibt''', '''Bosnian''': '''bos_Latn''', '''Buginese''': '''bug_Latn''', '''Bulgarian''': '''bul_Cyrl''', '''Catalan''': '''cat_Latn''', '''Cebuano''': '''ceb_Latn''', '''Czech''': '''ces_Latn''', '''Chokwe''': '''cjk_Latn''', '''Central Kurdish''': '''ckb_Arab''', '''Crimean Tatar''': '''crh_Latn''', '''Welsh''': '''cym_Latn''', '''Danish''': '''dan_Latn''', '''German''': '''deu_Latn''', '''Southwestern Dinka''': '''dik_Latn''', '''Dyula''': '''dyu_Latn''', '''Dzongkha''': '''dzo_Tibt''', '''Greek''': '''ell_Grek''', '''English''': '''eng_Latn''', '''Esperanto''': '''epo_Latn''', '''Estonian''': '''est_Latn''', '''Basque''': '''eus_Latn''', '''Ewe''': '''ewe_Latn''', '''Faroese''': '''fao_Latn''', '''Fijian''': '''fij_Latn''', '''Finnish''': '''fin_Latn''', '''Fon''': '''fon_Latn''', '''French''': '''fra_Latn''', '''Friulian''': '''fur_Latn''', '''Nigerian Fulfulde''': '''fuv_Latn''', '''Scottish Gaelic''': '''gla_Latn''', '''Irish''': '''gle_Latn''', '''Galician''': '''glg_Latn''', '''Guarani''': '''grn_Latn''', '''Gujarati''': '''guj_Gujr''', '''Haitian Creole''': '''hat_Latn''', '''Hausa''': '''hau_Latn''', '''Hebrew''': '''heb_Hebr''', '''Hindi''': '''hin_Deva''', '''Chhattisgarhi''': '''hne_Deva''', '''Croatian''': '''hrv_Latn''', '''Hungarian''': '''hun_Latn''', '''Armenian''': '''hye_Armn''', '''Igbo''': '''ibo_Latn''', '''Ilocano''': '''ilo_Latn''', '''Indonesian''': '''ind_Latn''', '''Icelandic''': '''isl_Latn''', '''Italian''': '''ita_Latn''', '''Javanese''': '''jav_Latn''', '''Japanese''': '''jpn_Jpan''', '''Kabyle''': '''kab_Latn''', '''Jingpho''': '''kac_Latn''', '''Kamba''': '''kam_Latn''', '''Kannada''': '''kan_Knda''', '''Kashmiri Arabic''': '''kas_Arab''', '''Kashmiri Devanagari''': '''kas_Deva''', '''Georgian''': '''kat_Geor''', '''Central Kanuri Arabic''': '''knc_Arab''', '''Central Kanuri Latin''': '''knc_Latn''', '''Kazakh''': '''kaz_Cyrl''', '''Kabiyè''': '''kbp_Latn''', '''Kabuverdianu''': '''kea_Latn''', '''Khmer''': '''khm_Khmr''', '''Kikuyu''': '''kik_Latn''', '''Kinyarwanda''': '''kin_Latn''', '''Kyrgyz''': '''kir_Cyrl''', '''Kimbundu''': '''kmb_Latn''', '''Northern Kurdish''': '''kmr_Latn''', '''Kikongo''': '''kon_Latn''', '''Korean''': '''kor_Hang''', '''Lao''': '''lao_Laoo''', '''Ligurian''': '''lij_Latn''', '''Limburgish''': '''lim_Latn''', '''Lingala''': '''lin_Latn''', '''Lithuanian''': '''lit_Latn''', '''Lombard''': '''lmo_Latn''', '''Latgalian''': '''ltg_Latn''', '''Luxembourgish''': '''ltz_Latn''', '''Luba-Kasai''': '''lua_Latn''', '''Ganda''': '''lug_Latn''', '''Luo''': '''luo_Latn''', '''Mizo''': '''lus_Latn''', '''Standard Latvian''': '''lvs_Latn''', '''Magahi''': '''mag_Deva''', '''Maithili''': '''mai_Deva''', '''Malayalam''': '''mal_Mlym''', '''Marathi''': '''mar_Deva''', '''Minangkabau Arabic ''': '''min_Arab''', '''Minangkabau Latin''': '''min_Latn''', '''Macedonian''': '''mkd_Cyrl''', '''Plateau Malagasy''': '''plt_Latn''', '''Maltese''': '''mlt_Latn''', '''Meitei Bengali''': '''mni_Beng''', '''Halh Mongolian''': '''khk_Cyrl''', '''Mossi''': '''mos_Latn''', '''Maori''': '''mri_Latn''', '''Burmese''': '''mya_Mymr''', '''Dutch''': '''nld_Latn''', '''Norwegian Nynorsk''': '''nno_Latn''', '''Norwegian Bokmål''': '''nob_Latn''', '''Nepali''': '''npi_Deva''', '''Northern Sotho''': '''nso_Latn''', '''Nuer''': '''nus_Latn''', '''Nyanja''': '''nya_Latn''', '''Occitan''': '''oci_Latn''', '''West Central Oromo''': '''gaz_Latn''', '''Odia''': '''ory_Orya''', '''Pangasinan''': '''pag_Latn''', '''Eastern Panjabi''': '''pan_Guru''', '''Papiamento''': '''pap_Latn''', '''Western Persian''': '''pes_Arab''', '''Polish''': '''pol_Latn''', '''Portuguese''': '''por_Latn''', '''Dari''': '''prs_Arab''', '''Southern Pashto''': '''pbt_Arab''', '''Ayacucho Quechua''': '''quy_Latn''', '''Romanian''': '''ron_Latn''', '''Rundi''': '''run_Latn''', '''Russian''': '''rus_Cyrl''', '''Sango''': '''sag_Latn''', '''Sanskrit''': '''san_Deva''', '''Santali''': '''sat_Olck''', '''Sicilian''': '''scn_Latn''', '''Shan''': '''shn_Mymr''', '''Sinhala''': '''sin_Sinh''', '''Slovak''': '''slk_Latn''', '''Slovenian''': '''slv_Latn''', '''Samoan''': '''smo_Latn''', '''Shona''': '''sna_Latn''', '''Sindhi''': '''snd_Arab''', '''Somali''': '''som_Latn''', '''Southern Sotho''': '''sot_Latn''', '''Spanish''': '''spa_Latn''', '''Tosk Albanian''': '''als_Latn''', '''Sardinian''': '''srd_Latn''', '''Serbian''': '''srp_Cyrl''', '''Swati''': '''ssw_Latn''', '''Sundanese''': '''sun_Latn''', '''Swedish''': '''swe_Latn''', '''Swahili''': '''swh_Latn''', '''Silesian''': '''szl_Latn''', '''Tamil''': '''tam_Taml''', '''Tatar''': '''tat_Cyrl''', '''Telugu''': '''tel_Telu''', '''Tajik''': '''tgk_Cyrl''', '''Tagalog''': '''tgl_Latn''', '''Thai''': '''tha_Thai''', '''Tigrinya''': '''tir_Ethi''', '''Tamasheq Latin''': '''taq_Latn''', '''Tamasheq Tifinagh''': '''taq_Tfng''', '''Tok Pisin''': '''tpi_Latn''', '''Tswana''': '''tsn_Latn''', '''Tsonga''': '''tso_Latn''', '''Turkmen''': '''tuk_Latn''', '''Tumbuka''': '''tum_Latn''', '''Turkish''': '''tur_Latn''', '''Twi''': '''twi_Latn''', '''Central Atlas Tamazight''': '''tzm_Tfng''', '''Uyghur''': '''uig_Arab''', '''Ukrainian''': '''ukr_Cyrl''', '''Umbundu''': '''umb_Latn''', '''Urdu''': '''urd_Arab''', '''Northern Uzbek''': '''uzn_Latn''', '''Venetian''': '''vec_Latn''', '''Vietnamese''': '''vie_Latn''', '''Waray''': '''war_Latn''', '''Wolof''': '''wol_Latn''', '''Xhosa''': '''xho_Latn''', '''Eastern Yiddish''': '''ydd_Hebr''', '''Yoruba''': '''yor_Latn''', '''Yue Chinese''': '''yue_Hant''', '''Chinese Simplified''': '''zho_Hans''', '''Chinese Traditional''': '''zho_Hant''', '''Standard Malay''': '''zsm_Latn''', '''Zulu''': '''zul_Latn''', } class UpperCAmelCase ( snake_case_ ): _lowercase: Dict = '''facebook/nllb-200-distilled-600M''' _lowercase: int = ( '''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ''' '''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ''' '''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ''' '''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.''' ) _lowercase: Any = '''translator''' _lowercase: Optional[int] = AutoTokenizer _lowercase: str = AutoModelForSeqaSeqLM _lowercase: List[Any] = LANGUAGE_CODES _lowercase: Tuple = ['''text''', '''text''', '''text'''] _lowercase: List[str] = ['''text'''] def lowercase__ ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict ) -> Optional[Any]: if src_lang not in self.lang_to_code: raise ValueError(f"{src_lang} is not a supported language." ) if tgt_lang not in self.lang_to_code: raise ValueError(f"{tgt_lang} is not a supported language." ) _lowerCAmelCase = self.lang_to_code[src_lang] _lowerCAmelCase = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( __snake_case , return_tensors="""pt""" , src_lang=__snake_case , tgt_lang=__snake_case ) def lowercase__ ( self : Optional[int] , __snake_case : Any ) -> List[str]: return self.model.generate(**__snake_case ) def lowercase__ ( self : Dict , __snake_case : List[Any] ) -> Any: return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__snake_case )
220
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'spiece.model'} __UpperCAmelCase = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } __UpperCAmelCase = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) __UpperCAmelCase = 0 __UpperCAmelCase = 1 __UpperCAmelCase = 2 __UpperCAmelCase = 3 __UpperCAmelCase = 4 class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Union[str, Any] = VOCAB_FILES_NAMES _snake_case : int = PRETRAINED_VOCAB_FILES_MAP _snake_case : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Union[str, Any] = '''left''' def __init__( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<sep>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<cls>" , _UpperCamelCase="<mask>" , _UpperCamelCase=["<eop>", "<eod>"] , _UpperCamelCase = None , **_UpperCamelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : int = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token UpperCAmelCase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_UpperCamelCase , remove_space=_UpperCamelCase , keep_accents=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) UpperCAmelCase_ : List[str] = 3 UpperCAmelCase_ : int = do_lower_case UpperCAmelCase_ : int = remove_space UpperCAmelCase_ : Dict = keep_accents UpperCAmelCase_ : List[str] = vocab_file UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCamelCase ) @property def __UpperCAmelCase ( self ) -> List[Any]: return len(self.sp_model ) def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> List[Any]: UpperCAmelCase_ : Tuple = self.__dict__.copy() UpperCAmelCase_ : Union[str, Any] = None return state def __setstate__( self , _UpperCamelCase ) -> Tuple: UpperCAmelCase_ : Tuple = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): UpperCAmelCase_ : Union[str, Any] = {} UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict: if self.remove_space: UpperCAmelCase_ : Optional[int] = ' '.join(inputs.strip().split() ) else: UpperCAmelCase_ : Dict = inputs UpperCAmelCase_ : str = outputs.replace('``' , '"' ).replace('\'\'' , '"' ) if not self.keep_accents: UpperCAmelCase_ : List[str] = unicodedata.normalize('NFKD' , _UpperCamelCase ) UpperCAmelCase_ : List[Any] = ''.join([c for c in outputs if not unicodedata.combining(_UpperCamelCase )] ) if self.do_lower_case: UpperCAmelCase_ : List[Any] = outputs.lower() return outputs def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = self.preprocess_text(_UpperCamelCase ) UpperCAmelCase_ : List[Any] = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) UpperCAmelCase_ : str = [] for piece in pieces: if len(_UpperCamelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit(): UpperCAmelCase_ : Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCamelCase , '' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: UpperCAmelCase_ : str = cur_pieces[1:] else: UpperCAmelCase_ : int = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_UpperCamelCase ) else: new_pieces.append(_UpperCamelCase ) return new_pieces def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]: return self.sp_model.PieceToId(_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple: return self.sp_model.IdToPiece(_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]: UpperCAmelCase_ : List[str] = ''.join(_UpperCamelCase ).replace(_UpperCamelCase , ' ' ).strip() return out_string def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = True , **_UpperCamelCase , ) -> str: UpperCAmelCase_ : Optional[int] = kwargs.pop('use_source_tokenizer' , _UpperCamelCase ) UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(_UpperCamelCase , skip_special_tokens=_UpperCamelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : Any = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) UpperCAmelCase_ : List[str] = [] sub_texts.append(_UpperCamelCase ) else: current_sub_text.append(_UpperCamelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens UpperCAmelCase_ : Dict = ''.join(_UpperCamelCase ) UpperCAmelCase_ : Dict = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: UpperCAmelCase_ : int = self.clean_up_tokenization(_UpperCamelCase ) return clean_text else: return text def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]: UpperCAmelCase_ : List[Any] = [self.sep_token_id] UpperCAmelCase_ : Tuple = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) if token_ids_a is not None: return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1, 1] return ([0] * len(_UpperCamelCase )) + [1, 1] def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]: UpperCAmelCase_ : Any = [self.sep_token_id] UpperCAmelCase_ : Any = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]: if not os.path.isdir(_UpperCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return UpperCAmelCase_ : List[str] = os.path.join( _UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , 'wb' ) as fi: UpperCAmelCase_ : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,)
29
import os import numpy import onnx def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: lowercase : int = a.name lowercase : Any = b.name lowercase : Optional[Any] = """""" lowercase : Dict = """""" lowercase : int = a == b lowercase : int = name_a lowercase : List[str] = name_b return res def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) _graph_replace_input_with(node_proto.attribute[1].g , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: for n in graph_proto.node: _node_replace_input_with(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]: lowercase : Any = list(model.graph.initializer ) lowercase : Dict = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i lowercase : Union[str, Any] = inits[i].name lowercase : Dict = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[str]: lowercase : Union[str, Any] = os.path.dirname(SCREAMING_SNAKE_CASE__ ) lowercase : Dict = os.path.basename(SCREAMING_SNAKE_CASE__ ) lowercase : str = onnx.load(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) lowercase : List[str] = list(model.graph.initializer ) lowercase : Tuple = set() lowercase : int = {} lowercase : Optional[Any] = [] lowercase : Dict = 0 for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if i in dup_set: continue for j in range(i + 1 , len(SCREAMING_SNAKE_CASE__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(SCREAMING_SNAKE_CASE__ ) dup_set.add(SCREAMING_SNAKE_CASE__ ) lowercase : int = inits[j].data_type lowercase : Optional[int] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , SCREAMING_SNAKE_CASE__ ) total_reduced_size += mem_size lowercase : Tuple = inits[i].name lowercase : int = inits[j].name if name_i in dup_map: dup_map[name_i].append(SCREAMING_SNAKE_CASE__ ) else: lowercase : List[str] = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1_024 / 1_024 / 1_024 , """GB""" ) lowercase : str = sorted(SCREAMING_SNAKE_CASE__ ) _remove_dup_initializers_from_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = """optimized_""" + model_file_name lowercase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) onnx.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return new_model
20
0
def snake_case ( snake_case__ :Dict) -> list: if len(snake_case__) <= 1: return [tuple(snake_case__)] _A = [] def generate(snake_case__ :str , snake_case__ :int): if k == 1: res.append(tuple(arr[:])) return generate(k - 1 , snake_case__) for i in range(k - 1): if k % 2 == 0: # k is even _A , _A = arr[k - 1], arr[i] else: # k is odd _A , _A = arr[k - 1], arr[0] generate(k - 1 , snake_case__) generate(len(snake_case__) , snake_case__) return res if __name__ == "__main__": _SCREAMING_SNAKE_CASE = input('Enter numbers separated by a comma:\n').strip() _SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(',')] print(heaps(arr))
359
import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class a : """simple docstring""" def __init__( self , lowerCAmelCase_ = "cpu" , lowerCAmelCase_ = "openai/clip-vit-large-patch14" ) -> None: _A = device _A = CLIPTokenizerFast.from_pretrained(lowerCAmelCase_ ) _A = [0.4814_5466, 0.457_8275, 0.4082_1073] _A = [0.2686_2954, 0.2613_0258, 0.2757_7711] _A = torchvision.transforms.Normalize(self.image_mean , self.image_std ) _A = torchvision.transforms.Resize(2_24 ) _A = torchvision.transforms.CenterCrop(2_24 ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: _A = self.resize(lowerCAmelCase_ ) _A = self.center_crop(lowerCAmelCase_ ) _A = self.normalize(lowerCAmelCase_ ) return images def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple: _A = self.tokenizer(text=lowerCAmelCase_ , **lowerCAmelCase_ ) _A = self.preprocess_img(lowerCAmelCase_ ) _A = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class a ( nn.Module ): """simple docstring""" def __init__( self , lowerCAmelCase_=10 , lowerCAmelCase_=0.01 , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="image" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> None: super().__init__() _A = None _A = device if device else get_device() if vqgan: _A = vqgan else: _A = load_vqgan(self.device , conf_path=lowerCAmelCase_ , ckpt_path=lowerCAmelCase_ ) self.vqgan.eval() if clip: _A = clip else: _A = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" ) self.clip.to(self.device ) _A = ProcessorGradientFlow(device=self.device ) _A = iterations _A = lr _A = log _A = make_grid _A = return_val _A = quantize _A = self.vqgan.decoder.z_shape def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=5 , lowerCAmelCase_=True ) -> Any: _A = [] if output_path is None: _A = """./animation.gif""" if input_path is None: _A = self.save_path _A = sorted(glob(input_path + """/*""" ) ) if not len(lowerCAmelCase_ ): raise ValueError( """No images found in save path, aborting (did you pass save_intermediate=True to the generate""" """ function?)""" ) if len(lowerCAmelCase_ ) == 1: print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" ) _A = total_duration / len(lowerCAmelCase_ ) _A = [frame_duration] * len(lowerCAmelCase_ ) if extend_frames: _A = 1.5 _A = 3 for file_name in paths: if file_name.endswith(""".png""" ): images.append(imageio.imread(lowerCAmelCase_ ) ) imageio.mimsave(lowerCAmelCase_ , lowerCAmelCase_ , duration=lowerCAmelCase_ ) print(F'''gif saved to {output_path}''' ) def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> str: if not (path or img): raise ValueError("""Input either path or tensor""" ) if img is not None: raise NotImplementedError _A = preprocess(Image.open(lowerCAmelCase_ ) , target_image_size=2_56 ).to(self.device ) _A = preprocess_vqgan(lowerCAmelCase_ ) _A , *_A = self.vqgan.encode(lowerCAmelCase_ ) return z def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]: _A = self.latent.detach().requires_grad_() _A = base_latent + transform_vector if self.quantize: _A , *_A = self.vqgan.quantize(lowerCAmelCase_ ) else: _A = trans_latent return self.vqgan.decode(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]: _A = self.clip_preprocessor(text=lowerCAmelCase_ , images=lowerCAmelCase_ , return_tensors="""pt""" , padding=lowerCAmelCase_ ) _A = self.clip(**lowerCAmelCase_ ) _A = clip_outputs.logits_per_image if weights is not None: _A = similarity_logits * weights return similarity_logits.sum() def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]: _A = self._get_clip_similarity(pos_prompts["""prompts"""] , lowerCAmelCase_ , weights=(1 / pos_prompts["""weights"""]) ) if neg_prompts: _A = self._get_clip_similarity(neg_prompts["""prompts"""] , lowerCAmelCase_ , weights=neg_prompts["""weights"""] ) else: _A = torch.tensor([1] , device=self.device ) _A = -torch.log(lowerCAmelCase_ ) + torch.log(lowerCAmelCase_ ) return loss def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int: _A = torch.randn_like(self.latent , requires_grad=lowerCAmelCase_ , device=self.device ) _A = torch.optim.Adam([vector] , lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() _A = self._add_vector(lowerCAmelCase_ ) _A = loop_post_process(lowerCAmelCase_ ) _A = self._get_CLIP_loss(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) print("""CLIP loss""" , lowerCAmelCase_ ) if self.log: wandb.log({"""CLIP Loss""": clip_loss} ) clip_loss.backward(retain_graph=lowerCAmelCase_ ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: wandb.init(reinit=lowerCAmelCase_ , project="""face-editor""" ) wandb.config.update({"""Positive Prompts""": positive_prompts} ) wandb.config.update({"""Negative Prompts""": negative_prompts} ) wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} ) if image_path: _A = Image.open(lowerCAmelCase_ ) _A = image.resize((2_56, 2_56) ) wandb.log("""Original Image""" , wandb.Image(lowerCAmelCase_ ) ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int: if not prompts: return [] _A = [] _A = [] if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _A = [prompt.strip() for prompt in prompts.split("""|""" )] for prompt in prompts: if isinstance(lowerCAmelCase_ , (tuple, list) ): _A = prompt[0] _A = float(prompt[1] ) elif ":" in prompt: _A , _A = prompt.split(""":""" ) _A = float(lowerCAmelCase_ ) else: _A = prompt _A = 1.0 processed_prompts.append(lowerCAmelCase_ ) weights.append(lowerCAmelCase_ ) return { "prompts": processed_prompts, "weights": torch.tensor(lowerCAmelCase_ , device=self.device ), } def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , ) -> str: if image_path: _A = self._get_latent(lowerCAmelCase_ ) else: _A = torch.randn(self.latent_dim , device=self.device ) if self.log: self._init_logging(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) assert pos_prompts, "You must provide at least one positive prompt." _A = self.process_prompts(lowerCAmelCase_ ) _A = self.process_prompts(lowerCAmelCase_ ) if save_final and save_path is None: _A = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) ) if not os.path.exists(lowerCAmelCase_ ): os.makedirs(lowerCAmelCase_ ) else: _A = save_path + """_""" + get_timestamp() os.makedirs(lowerCAmelCase_ ) _A = save_path _A = self.vqgan.decode(self.latent )[0] if show_intermediate: print("""Original Image""" ) show_pil(custom_to_pil(lowerCAmelCase_ ) ) _A = loop_post_process(lowerCAmelCase_ ) for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ): if show_intermediate: show_pil(lowerCAmelCase_ ) if save_intermediate: transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) ) if self.log: wandb.log({"""Image""": wandb.Image(lowerCAmelCase_ )} ) if show_final: show_pil(lowerCAmelCase_ ) if save_final: transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
81
0
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class UpperCamelCase__ : def __init__(self : Any , snake_case_ : List[Any] , snake_case_ : int=1_3 , snake_case_ : List[str]=1_0 , snake_case_ : List[Any]=3 , snake_case_ : Optional[int]=2 , snake_case_ : Optional[int]=2 , snake_case_ : Any=2 , snake_case_ : str=True , snake_case_ : Tuple=True , snake_case_ : List[Any]=3_2 , snake_case_ : Dict=5 , snake_case_ : Any=4 , snake_case_ : Union[str, Any]=3_7 , snake_case_ : Tuple="gelu" , snake_case_ : Optional[Any]=0.1 , snake_case_ : Tuple=0.1 , snake_case_ : Union[str, Any]=1_0 , snake_case_ : Any=0.02 , snake_case_ : str=0.9 , snake_case_ : Optional[int]=None , ): __a : Any = parent __a : Any = batch_size __a : int = image_size __a : List[Any] = num_channels __a : Any = patch_size __a : Optional[int] = tubelet_size __a : Optional[int] = num_frames __a : str = is_training __a : List[str] = use_labels __a : List[Any] = hidden_size __a : Union[str, Any] = num_hidden_layers __a : Optional[int] = num_attention_heads __a : List[Any] = intermediate_size __a : Tuple = hidden_act __a : Union[str, Any] = hidden_dropout_prob __a : int = attention_probs_dropout_prob __a : Union[str, Any] = type_sequence_label_size __a : int = initializer_range __a : str = mask_ratio __a : int = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame __a : List[str] = (image_size // patch_size) ** 2 __a : Tuple = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos __a : Optional[int] = int(mask_ratio * self.seq_length ) def lowerCAmelCase (self : str ): __a : Optional[int] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __a : Tuple = None if self.use_labels: __a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : str = self.get_config() return config, pixel_values, labels def lowerCAmelCase (self : int ): return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , ) def lowerCAmelCase (self : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Any ): __a : List[str] = VideoMAEModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() __a : Union[str, Any] = model(snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase (self : List[str] , snake_case_ : Dict , snake_case_ : int , snake_case_ : Optional[Any] ): __a : Optional[int] = VideoMAEForPreTraining(snake_case_ ) model.to(snake_case_ ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch __a : Tuple = torch.ones((self.num_masks,) ) __a : List[str] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) __a : Any = mask.expand(self.batch_size , -1 ).bool() __a : List[Any] = model(snake_case_ , snake_case_ ) # model only returns predictions for masked patches __a : Dict = mask.sum().item() __a : Optional[int] = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def lowerCAmelCase (self : Optional[int] ): __a : Tuple = self.prepare_config_and_inputs() __a , __a , __a : Optional[int] = config_and_inputs __a : List[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ ( __lowercase ,__lowercase ,unittest.TestCase ): _SCREAMING_SNAKE_CASE : int = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE : Optional[int] = ( {"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification} if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE : int = False _SCREAMING_SNAKE_CASE : Any = False _SCREAMING_SNAKE_CASE : Dict = False _SCREAMING_SNAKE_CASE : Any = False def lowerCAmelCase (self : List[Any] ): __a : int = VideoMAEModelTester(self ) __a : Union[str, Any] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 ) def lowerCAmelCase (self : Dict , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : int=False ): __a : List[str] = copy.deepcopy(snake_case_ ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch __a : Dict = torch.ones((self.model_tester.num_masks,) ) __a : List[str] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) __a : List[Any] = mask.expand(self.model_tester.batch_size , -1 ).bool() __a : Any = bool_masked_pos.to(snake_case_ ) if return_labels: if model_class in [ *get_values(snake_case_ ), ]: __a : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case_ ) return inputs_dict def lowerCAmelCase (self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason='''VideoMAE does not use inputs_embeds''' ) def lowerCAmelCase (self : Optional[int] ): pass def lowerCAmelCase (self : Union[str, Any] ): __a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Dict = model_class(snake_case_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __a : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) ) def lowerCAmelCase (self : str ): __a , __a : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Union[str, Any] = model_class(snake_case_ ) __a : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a : Optional[int] = [*signature.parameters.keys()] __a : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , snake_case_ ) def lowerCAmelCase (self : Optional[int] ): __a : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def lowerCAmelCase (self : Optional[Any] ): __a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*snake_case_ ) @slow def lowerCAmelCase (self : Any ): for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : Tuple = VideoMAEModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def lowerCAmelCase (self : List[str] ): if not self.has_attentions: pass else: __a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common() __a : List[Any] = True for model_class in self.all_model_classes: __a : List[Any] = self.model_tester.seq_length - self.model_tester.num_masks __a : Any = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) __a : List[Any] = True __a : str = False __a : Any = True __a : List[Any] = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): __a : Tuple = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) __a : Any = outputs.attentions self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __a : List[Any] = True __a : Dict = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): __a : Tuple = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) __a : List[Any] = outputs.attentions self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) __a : str = len(snake_case_ ) # Check attention is always last and order is fine __a : Tuple = True __a : Any = True __a : Optional[int] = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): __a : Tuple = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) self.assertEqual(out_len + 1 , len(snake_case_ ) ) __a : Dict = outputs.attentions self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def lowerCAmelCase (self : Tuple ): def check_hidden_states_output(snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Tuple ): __a : int = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): __a : List[str] = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) __a : Tuple = outputs.hidden_states __a : Optional[Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(snake_case_ ) , snake_case_ ) __a : int = self.model_tester.seq_length - self.model_tester.num_masks __a : Dict = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __a , __a : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : int = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a : Dict = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowerCAmelCase (self : Optional[int] ): pass def __UpperCamelCase ( ): __a : Tuple = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' ) __a : int = np.load(lowerCAmelCase__ ) return list(lowerCAmelCase__ ) @require_torch @require_vision class UpperCamelCase__ ( unittest.TestCase ): @cached_property def lowerCAmelCase (self : Optional[Any] ): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowerCAmelCase (self : List[Any] ): __a : List[Any] = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to( snake_case_ ) __a : Union[str, Any] = self.default_image_processor __a : Any = prepare_video() __a : int = image_processor(snake_case_ , return_tensors='''pt''' ).to(snake_case_ ) # forward pass with torch.no_grad(): __a : str = model(**snake_case_ ) # verify the logits __a : Dict = torch.Size((1, 4_0_0) ) self.assertEqual(outputs.logits.shape , snake_case_ ) __a : Optional[int] = torch.tensor([0.3669, -0.0688, -0.2421] ).to(snake_case_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) ) @slow def lowerCAmelCase (self : Dict ): __a : str = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(snake_case_ ) __a : Optional[Any] = self.default_image_processor __a : Optional[Any] = prepare_video() __a : Optional[Any] = image_processor(snake_case_ , return_tensors='''pt''' ).to(snake_case_ ) # add boolean mask, indicating which patches to mask __a : int = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' ) __a : List[str] = torch.load(snake_case_ ) # forward pass with torch.no_grad(): __a : Union[str, Any] = model(**snake_case_ ) # verify the logits __a : int = torch.Size([1, 1_4_0_8, 1_5_3_6] ) __a : Any = torch.tensor( [[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=snake_case_ ) self.assertEqual(outputs.logits.shape , snake_case_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , snake_case_ , atol=1E-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) __a : Tuple = torch.tensor([0.5142] , device=snake_case_ ) self.assertTrue(torch.allclose(outputs.loss , snake_case_ , atol=1E-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) __a : Union[str, Any] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=snake_case_ ).to( snake_case_ ) with torch.no_grad(): __a : Union[str, Any] = model(**snake_case_ ) __a : List[Any] = torch.tensor(torch.tensor([0.6469] ) , device=snake_case_ ) self.assertTrue(torch.allclose(outputs.loss , snake_case_ , atol=1E-4 ) )
216
import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() lowercase__ =logging.get_logger(__name__) def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict ): __a : List[str] = os.path.abspath(lowerCAmelCase__ ) logger.info(f"Converting TensorFlow checkpoint from {tf_path}" ) # Load weights from TF model __a : Tuple = tf.train.list_variables(lowerCAmelCase__ ) __a : Optional[Any] = [] __a : Union[str, Any] = [] __a : str = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") __a : Any = full_name.split('''/''' ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(f"Skipping non-model layer {full_name}" ) continue if "optimizer" in full_name: logger.info(f"Skipping optimization layer {full_name}" ) continue if name[0] == "model": # ignore initial 'model' __a : Any = name[1:] # figure out how many levels deep the name is __a : List[Any] = 0 for _name in name: if _name.startswith('''layer_with_weights''' ): depth += 1 else: break layer_depth.append(lowerCAmelCase__ ) # read data __a : Tuple = tf.train.load_variable(lowerCAmelCase__ , lowerCAmelCase__ ) names.append('''/'''.join(lowerCAmelCase__ ) ) arrays.append(lowerCAmelCase__ ) logger.info(f"Read a total of {len(lowerCAmelCase__ ):,} layers" ) # Sanity check if len(set(lowerCAmelCase__ ) ) != 1: raise ValueError(f"Found layer names with different depths (layer depth {list(set(lowerCAmelCase__ ) )})" ) __a : int = list(set(lowerCAmelCase__ ) )[0] if layer_depth != 1: raise ValueError( '''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP''' ''' heads.''' ) # convert layers logger.info('''Converting weights...''' ) for full_name, array in zip(lowerCAmelCase__ , lowerCAmelCase__ ): __a : int = full_name.split('''/''' ) __a : Tuple = model __a : Dict = [] for i, m_name in enumerate(lowerCAmelCase__ ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith('''layer_with_weights''' ): __a : Union[str, Any] = int(m_name.split('''-''' )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(['''embeddings''', '''LayerNorm'''] ) __a : Union[str, Any] = getattr(lowerCAmelCase__ , '''embeddings''' ) __a : List[str] = getattr(lowerCAmelCase__ , '''LayerNorm''' ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] ) __a : Dict = getattr(lowerCAmelCase__ , '''encoder''' ) __a : Union[str, Any] = getattr(lowerCAmelCase__ , '''layer''' ) __a : Any = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(['''pooler''', '''dense'''] ) __a : Any = getattr(lowerCAmelCase__ , '''pooler''' ) __a : Optional[int] = getattr(lowerCAmelCase__ , '''dense''' ) elif m_name == "embeddings": trace.append('''embeddings''' ) __a : int = getattr(lowerCAmelCase__ , '''embeddings''' ) if layer_num == 0: trace.append('''word_embeddings''' ) __a : Optional[int] = getattr(lowerCAmelCase__ , '''word_embeddings''' ) elif layer_num == 1: trace.append('''position_embeddings''' ) __a : List[str] = getattr(lowerCAmelCase__ , '''position_embeddings''' ) elif layer_num == 2: trace.append('''token_type_embeddings''' ) __a : Optional[Any] = getattr(lowerCAmelCase__ , '''token_type_embeddings''' ) else: raise ValueError(f"Unknown embedding layer with name {full_name}" ) trace.append('''weight''' ) __a : Tuple = getattr(lowerCAmelCase__ , '''weight''' ) elif m_name == "_attention_layer": # self-attention layer trace.extend(['''attention''', '''self'''] ) __a : Optional[Any] = getattr(lowerCAmelCase__ , '''attention''' ) __a : Union[str, Any] = getattr(lowerCAmelCase__ , '''self''' ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(['''attention''', '''output''', '''LayerNorm'''] ) __a : int = getattr(lowerCAmelCase__ , '''attention''' ) __a : List[Any] = getattr(lowerCAmelCase__ , '''output''' ) __a : List[Any] = getattr(lowerCAmelCase__ , '''LayerNorm''' ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(['''attention''', '''output''', '''dense'''] ) __a : Optional[int] = getattr(lowerCAmelCase__ , '''attention''' ) __a : Optional[Any] = getattr(lowerCAmelCase__ , '''output''' ) __a : Any = getattr(lowerCAmelCase__ , '''dense''' ) elif m_name == "_output_dense": # output dense trace.extend(['''output''', '''dense'''] ) __a : Tuple = getattr(lowerCAmelCase__ , '''output''' ) __a : str = getattr(lowerCAmelCase__ , '''dense''' ) elif m_name == "_output_layer_norm": # output dense trace.extend(['''output''', '''LayerNorm'''] ) __a : int = getattr(lowerCAmelCase__ , '''output''' ) __a : str = getattr(lowerCAmelCase__ , '''LayerNorm''' ) elif m_name == "_key_dense": # attention key trace.append('''key''' ) __a : Union[str, Any] = getattr(lowerCAmelCase__ , '''key''' ) elif m_name == "_query_dense": # attention query trace.append('''query''' ) __a : Union[str, Any] = getattr(lowerCAmelCase__ , '''query''' ) elif m_name == "_value_dense": # attention value trace.append('''value''' ) __a : Union[str, Any] = getattr(lowerCAmelCase__ , '''value''' ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(['''intermediate''', '''dense'''] ) __a : Optional[Any] = getattr(lowerCAmelCase__ , '''intermediate''' ) __a : Optional[int] = getattr(lowerCAmelCase__ , '''dense''' ) elif m_name == "_output_layer_norm": # output layer norm trace.append('''output''' ) __a : int = getattr(lowerCAmelCase__ , '''output''' ) # weights & biases elif m_name in ["bias", "beta"]: trace.append('''bias''' ) __a : Dict = getattr(lowerCAmelCase__ , '''bias''' ) elif m_name in ["kernel", "gamma"]: trace.append('''weight''' ) __a : List[Any] = getattr(lowerCAmelCase__ , '''weight''' ) else: logger.warning(f"Ignored {m_name}" ) # for certain layers reshape is necessary __a : List[str] = '''.'''.join(lowerCAmelCase__ ) if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , lowerCAmelCase__ ) or re.match( R'''(\S+)\.attention\.output\.dense\.weight''' , lowerCAmelCase__ ): __a : str = array.reshape(pointer.data.shape ) if "kernel" in full_name: __a : Optional[Any] = array.transpose() if pointer.shape == array.shape: __a : str = torch.from_numpy(lowerCAmelCase__ ) else: raise ValueError( f"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:" f" {array.shape}" ) logger.info(f"Successfully set variable {full_name} to PyTorch layer {trace}" ) return model def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ): # Instantiate model logger.info(f"Loading model based on config from {config_path}..." ) __a : Dict = BertConfig.from_json_file(lowerCAmelCase__ ) __a : int = BertModel(lowerCAmelCase__ ) # Load weights from checkpoint logger.info(f"Loading weights from checkpoint {tf_checkpoint_path}..." ) load_tfa_weights_in_bert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Save pytorch-model logger.info(f"Saving PyTorch model to {pytorch_dump_path}..." ) torch.save(model.state_dict() , lowerCAmelCase__ ) if __name__ == "__main__": lowercase__ =argparse.ArgumentParser() parser.add_argument( '--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.' ) parser.add_argument( '--bert_config_file', type=str, required=True, help='The config json file corresponding to the BERT model. This specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', type=str, required=True, help='Path to the output PyTorch model (must include filename).', ) lowercase__ =parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
216
1
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = """Wav2Vec2FeatureExtractor""" SCREAMING_SNAKE_CASE_ : Tuple = """AutoTokenizer""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ): super().__init__(_lowercase ,_lowercase ) _lowercase : int = self.feature_extractor _lowercase : List[Any] = False @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ): try: return super().from_pretrained(_lowercase ,**_lowercase ) except OSError: warnings.warn( f"""Loading a tokenizer inside {cls.__name__} from a config that does not""" """ include a `tokenizer_class` attribute is deprecated and will be """ """removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`""" """ attribute to either your `config.json` or `tokenizer_config.json` """ """file to suppress this warning: """ ,_lowercase ,) _lowercase : str = WavaVecaFeatureExtractor.from_pretrained(_lowercase ,**_lowercase ) _lowercase : Optional[int] = WavaVecaCTCTokenizer.from_pretrained(_lowercase ,**_lowercase ) return cls(feature_extractor=_lowercase ,tokenizer=_lowercase ) def __call__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_lowercase ,**_lowercase ) if "raw_speech" in kwargs: warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" ) _lowercase : List[str] = kwargs.pop("""raw_speech""" ) else: _lowercase : Dict = kwargs.pop("""audio""" ,_lowercase ) _lowercase : List[str] = kwargs.pop("""sampling_rate""" ,_lowercase ) _lowercase : Optional[int] = kwargs.pop("""text""" ,_lowercase ) if len(_lowercase ) > 0: _lowercase : Tuple = args[0] _lowercase : str = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if audio is not None: _lowercase : Optional[Any] = self.feature_extractor(_lowercase ,*_lowercase ,sampling_rate=_lowercase ,**_lowercase ) if text is not None: _lowercase : Any = self.tokenizer(_lowercase ,**_lowercase ) if text is None: return inputs elif audio is None: return encodings else: _lowercase : Union[str, Any] = encodings["""input_ids"""] return inputs def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): # For backward compatibility if self._in_target_context_manager: return self.current_processor.pad(*_lowercase ,**_lowercase ) _lowercase : Tuple = kwargs.pop("""input_features""" ,_lowercase ) _lowercase : Tuple = kwargs.pop("""labels""" ,_lowercase ) if len(_lowercase ) > 0: _lowercase : int = args[0] _lowercase : Dict = args[1:] if input_features is not None: _lowercase : str = self.feature_extractor.pad(_lowercase ,*_lowercase ,**_lowercase ) if labels is not None: _lowercase : List[Any] = self.tokenizer.pad(_lowercase ,**_lowercase ) if labels is None: return input_features elif input_features is None: return labels else: _lowercase : Union[str, Any] = labels["""input_ids"""] return input_features def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): return self.tokenizer.batch_decode(*_lowercase ,**_lowercase ) def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): return self.tokenizer.decode(*_lowercase ,**_lowercase ) @contextmanager def lowerCamelCase__ ( self ): warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your audio inputs, or in a separate call.""" ) _lowercase : Optional[int] = True _lowercase : str = self.tokenizer yield _lowercase : Any = self.feature_extractor _lowercase : Optional[int] = False
366
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self ): _lowercase : str = tempfile.mkdtemp() # fmt: off _lowercase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on _lowercase : Optional[int] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) ) _lowercase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] _lowercase : Optional[int] = {"""unk_token""": """<unk>"""} _lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) _lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCAmelCase_ ) ) _lowercase : Dict = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48145466, 0.4578275, 0.40821073], """image_std""": [0.26862954, 0.26130258, 0.27577711], } _lowercase : List[Any] = os.path.join(self.tmpdirname ,UpperCAmelCase_ ) with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp: json.dump(UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ): shutil.rmtree(self.tmpdirname ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] _lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = self.get_tokenizer() _lowercase : List[Any] = self.get_rust_tokenizer() _lowercase : List[Any] = self.get_image_processor() _lowercase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) processor_slow.save_pretrained(self.tmpdirname ) _lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCAmelCase_ ) _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) processor_fast.save_pretrained(self.tmpdirname ) _lowercase : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,UpperCAmelCase_ ) self.assertIsInstance(processor_fast.tokenizer ,UpperCAmelCase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,UpperCAmelCase_ ) self.assertIsInstance(processor_fast.image_processor ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : str = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) _lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 ) _lowercase : int = CLIPProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : Optional[int] = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : int = self.prepare_image_inputs() _lowercase : str = image_processor(UpperCAmelCase_ ,return_tensors="""np""" ) _lowercase : int = processor(images=UpperCAmelCase_ ,return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : Optional[Any] = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : List[Any] = """lower newer""" _lowercase : Any = processor(text=UpperCAmelCase_ ) _lowercase : Union[str, Any] = tokenizer(UpperCAmelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : str = """lower newer""" _lowercase : List[Any] = self.prepare_image_inputs() _lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase_ ): processor() def lowerCamelCase__ ( self ): _lowercase : Dict = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowercase : int = processor.batch_decode(UpperCAmelCase_ ) _lowercase : Tuple = tokenizer.batch_decode(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : Optional[Any] = """lower newer""" _lowercase : Any = self.prepare_image_inputs() _lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
336
0
import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class _A : UpperCamelCase__ : Optional[Union[str, Path]] = None UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : Optional[Dict] = None UpperCamelCase__ : Optional[str] = None UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : bool = True UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : int = 1 UpperCamelCase__ : Optional[Union[str, bool]] = None UpperCamelCase__ : bool = False UpperCamelCase__ : Optional[Dict] = None UpperCamelCase__ : Optional[str] = None def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' return self.__class__(**{k: copy.deepcopy(__SCREAMING_SNAKE_CASE) for k, v in self.__dict__.items()})
49
'''simple docstring''' def UpperCAmelCase_ ( __lowerCamelCase : int = 1_00 ): lowercase_ :Tuple = n * (n + 1) * (2 * n + 1) / 6 lowercase_ :List[str] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'''{solution() = }''')
223
0
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class _snake_case : def __init__( self : Union[str, Any], __lowercase : Optional[int], __lowercase : Optional[int]=99, __lowercase : Tuple=13, __lowercase : Union[str, Any]=16, __lowercase : List[Any]=7, __lowercase : Union[str, Any]=True, __lowercase : int=True, __lowercase : Tuple=True, __lowercase : str=False, __lowercase : Optional[int]=True, __lowercase : str=2, __lowercase : Union[str, Any]=32, __lowercase : Any=4, __lowercase : Dict=4, __lowercase : Tuple=30, __lowercase : Union[str, Any]=0, __lowercase : List[Any]=1, __lowercase : Optional[int]=2, __lowercase : Union[str, Any]=None, ): lowercase__ = parent lowercase__ = batch_size lowercase__ = decoder_seq_length # For common tests lowercase__ = self.decoder_seq_length lowercase__ = is_training lowercase__ = use_attention_mask lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = d_model lowercase__ = d_model lowercase__ = decoder_layers lowercase__ = decoder_layers lowercase__ = decoder_ffn_dim lowercase__ = decoder_attention_heads lowercase__ = decoder_attention_heads lowercase__ = eos_token_id lowercase__ = bos_token_id lowercase__ = pad_token_id lowercase__ = decoder_start_token_id lowercase__ = use_cache lowercase__ = max_position_embeddings lowercase__ = None lowercase__ = decoder_seq_length lowercase__ = 2 lowercase__ = 1 def A__ ( self : Dict ): lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size ) lowercase__ = None if self.use_attention_mask: lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2 ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size ) lowercase__ = TrOCRConfig( vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, ) return (config, input_ids, attention_mask, lm_labels) def A__ ( self : str, __lowercase : Union[str, Any], __lowercase : Dict, __lowercase : Union[str, Any], __lowercase : List[str], ): lowercase__ = True lowercase__ = TrOCRDecoder(config=__lowercase ).to(__lowercase ).eval() lowercase__ = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass lowercase__ = model(__lowercase, use_cache=__lowercase ) lowercase__ = model(__lowercase ) lowercase__ = model(__lowercase, use_cache=__lowercase ) self.parent.assertTrue(len(__lowercase ) == len(__lowercase ) ) self.parent.assertTrue(len(__lowercase ) == len(__lowercase ) + 1 ) lowercase__ = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids lowercase__ = ids_tensor((2, 1), config.vocab_size - 1 ) + 1 # append to next input_ids and lowercase__ = torch.cat([input_ids, next_tokens], dim=-1 ) lowercase__ = model(__lowercase )["last_hidden_state"] lowercase__ = model(__lowercase, past_key_values=__lowercase )["last_hidden_state"] # select random slice lowercase__ = ids_tensor((1,), output_from_past.shape[-1] ).item() lowercase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() lowercase__ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(__lowercase, __lowercase, atol=1e-3 ) def A__ ( self : Dict ): lowercase__ = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs lowercase__ = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_torch class _snake_case ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase): UpperCamelCase__ : List[str] =(TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () UpperCamelCase__ : List[str] =(TrOCRForCausalLM,) if is_torch_available() else () UpperCamelCase__ : List[Any] ={"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {} UpperCamelCase__ : int =True UpperCamelCase__ : List[Any] =False def A__ ( self : Dict ): lowercase__ = TrOCRStandaloneDecoderModelTester(self, is_training=__lowercase ) lowercase__ = ConfigTester(self, config_class=__lowercase ) def A__ ( self : List[str] ): pass def A__ ( self : List[Any] ): pass def A__ ( self : Union[str, Any] ): pass def A__ ( self : List[str] ): self.config_tester.run_common_tests() def A__ ( self : Optional[int] ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*__lowercase ) def A__ ( self : Optional[int] ): return @unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :) def A__ ( self : int ): pass
224
from __future__ import annotations def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): return len(set(SCREAMING_SNAKE_CASE_ ) ) == len(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod()
224
1
from __future__ import annotations def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> list[int]: return [ord(snake_case__ ) - 9_6 for elem in plain] def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str: return "".join(chr(elem + 9_6 ) for elem in encoded ) def SCREAMING_SNAKE_CASE_ ( ) -> None: lowerCAmelCase = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''' , snake_case__ ) print('''Decoded:''' , decode(snake_case__ ) ) if __name__ == "__main__": main()
338
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration lowercase__ : Any = { '''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''', '''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''', '''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''', '''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''', '''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''', '''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''', '''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''', '''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''', '''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''', '''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''', } def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str: lowerCAmelCase = ['''layers''', '''blocks'''] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) lowercase__ : List[Any] = { '''blocks''': '''layers''', '''mlp.0''': '''fc1''', '''mlp.2''': '''fc2''', '''mlp_ln''': '''final_layer_norm''', '''.attn.query''': '''.self_attn.q_proj''', '''.attn.key''': '''.self_attn.k_proj''', '''.attn.value''': '''.self_attn.v_proj''', '''.attn_ln''': '''.self_attn_layer_norm''', '''.attn.out''': '''.self_attn.out_proj''', '''.cross_attn.query''': '''.encoder_attn.q_proj''', '''.cross_attn.key''': '''.encoder_attn.k_proj''', '''.cross_attn.value''': '''.encoder_attn.v_proj''', '''.cross_attn_ln''': '''.encoder_attn_layer_norm''', '''.cross_attn.out''': '''.encoder_attn.out_proj''', '''decoder.ln.''': '''decoder.layer_norm.''', '''encoder.ln.''': '''encoder.layer_norm.''', '''token_embedding''': '''embed_tokens''', '''encoder.positional_embedding''': '''encoder.embed_positions.weight''', '''decoder.positional_embedding''': '''decoder.embed_positions.weight''', '''ln_post''': '''layer_norm''', } def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]: lowerCAmelCase = list(s_dict.keys() ) for key in keys: lowerCAmelCase = key for k, v in WHISPER_MAPPING.items(): if k in key: lowerCAmelCase = new_key.replace(snake_case__ , snake_case__ ) print(f"{key} -> {new_key}" ) lowerCAmelCase = s_dict.pop(snake_case__ ) return s_dict def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]: lowerCAmelCase , lowerCAmelCase = emb.weight.shape lowerCAmelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) lowerCAmelCase = emb.weight.data return lin_layer def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> bytes: os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowerCAmelCase = os.path.basename(snake_case__ ) lowerCAmelCase = url.split('''/''' )[-2] lowerCAmelCase = os.path.join(snake_case__ , snake_case__ ) if os.path.exists(snake_case__ ) and not os.path.isfile(snake_case__ ): raise RuntimeError(f"{download_target} exists and is not a regular file" ) if os.path.isfile(snake_case__ ): lowerCAmelCase = open(snake_case__ , '''rb''' ).read() if hashlib.shaaaa(snake_case__ ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(snake_case__ ) as source, open(snake_case__ , '''wb''' ) as output: with tqdm( total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=snake_case__ , unit_divisor=1_0_2_4 ) as loop: while True: lowerCAmelCase = source.read(8_1_9_2 ) if not buffer: break output.write(snake_case__ ) loop.update(len(snake_case__ ) ) lowerCAmelCase = open(snake_case__ , '''rb''' ).read() if hashlib.shaaaa(snake_case__ ).hexdigest() != expected_shaaaa: raise RuntimeError( '''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' ) return model_bytes def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str: if ".pt" not in checkpoint_path: lowerCAmelCase = _download(_MODELS[checkpoint_path] ) else: lowerCAmelCase = torch.load(snake_case__ , map_location='''cpu''' ) lowerCAmelCase = original_checkpoint['''dims'''] lowerCAmelCase = original_checkpoint['''model_state_dict'''] lowerCAmelCase = state_dict['''decoder.token_embedding.weight'''] remove_ignore_keys_(snake_case__ ) rename_keys(snake_case__ ) lowerCAmelCase = True lowerCAmelCase = state_dict['''decoder.layers.0.fc1.weight'''].shape[0] lowerCAmelCase = WhisperConfig( vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=snake_case__ , decoder_ffn_dim=snake_case__ , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , ) lowerCAmelCase = WhisperForConditionalGeneration(snake_case__ ) lowerCAmelCase , lowerCAmelCase = model.model.load_state_dict(snake_case__ , strict=snake_case__ ) if len(snake_case__ ) > 0 and not set(snake_case__ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f" but all the following weights are missing {missing}" ) if tie_embeds: lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: lowerCAmelCase = proj_out_weights model.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase__ : List[str] = argparse.ArgumentParser() # # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowercase__ : int = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
338
1
'''simple docstring''' import cva import numpy as np class a : def __init__( self , __magic_name__ , __magic_name__ ) -> int: if k in (0.0_4, 0.0_6): _a = k _a = window_size else: raise ValueError('invalid k value' ) def __str__( self ) -> str: return str(self.k ) def __UpperCAmelCase ( self , __magic_name__ ) -> tuple[cva.Mat, list[list[int]]]: _a = cva.imread(__magic_name__ , 0 ) _a , _a = img.shape _a = [] _a = img.copy() _a = cva.cvtColor(__magic_name__ , cva.COLOR_GRAY2RGB ) _a , _a = np.gradient(__magic_name__ ) _a = dx**2 _a = dy**2 _a = dx * dy _a = 0.0_4 _a = self.window_size // 2 for y in range(__magic_name__ , h - offset ): for x in range(__magic_name__ , w - offset ): _a = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _a = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _a = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _a = (wxx * wyy) - (wxy**2) _a = wxx + wyy _a = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 2_55 ) return color_img, corner_list if __name__ == "__main__": a_ : Any = HarrisCorner(0.04, 3) a_ : Optional[int] = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
357
'''simple docstring''' import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class a ( unittest.TestCase ): def __UpperCAmelCase ( self ) -> int: _a = [ 'safety_checker/pytorch_model.bin', 'safety_checker/model.safetensors', 'vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.safetensors', 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] self.assertTrue(is_safetensors_compatible(__magic_name__ ) ) def __UpperCAmelCase ( self ) -> Dict: _a = [ 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] self.assertTrue(is_safetensors_compatible(__magic_name__ ) ) def __UpperCAmelCase ( self ) -> Optional[int]: _a = [ 'safety_checker/pytorch_model.bin', 'safety_checker/model.safetensors', 'vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.safetensors', 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', 'unet/diffusion_pytorch_model.bin', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__magic_name__ ) ) def __UpperCAmelCase ( self ) -> Optional[Any]: _a = [ 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', ] self.assertTrue(is_safetensors_compatible(__magic_name__ ) ) def __UpperCAmelCase ( self ) -> str: _a = [ 'safety_checker/pytorch_model.bin', 'safety_checker/model.safetensors', 'vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.safetensors', 'text_encoder/pytorch_model.bin', # Removed: 'text_encoder/model.safetensors', 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__magic_name__ ) ) def __UpperCAmelCase ( self ) -> List[Any]: _a = [ 'safety_checker/pytorch_model.fp16.bin', 'safety_checker/model.fp16.safetensors', 'vae/diffusion_pytorch_model.fp16.bin', 'vae/diffusion_pytorch_model.fp16.safetensors', 'text_encoder/pytorch_model.fp16.bin', 'text_encoder/model.fp16.safetensors', 'unet/diffusion_pytorch_model.fp16.bin', 'unet/diffusion_pytorch_model.fp16.safetensors', ] _a = 'fp16' self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) ) def __UpperCAmelCase ( self ) -> Any: _a = [ 'unet/diffusion_pytorch_model.fp16.bin', 'unet/diffusion_pytorch_model.fp16.safetensors', ] _a = 'fp16' self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) ) def __UpperCAmelCase ( self ) -> int: # pass variant but use the non-variant filenames _a = [ 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] _a = 'fp16' self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) ) def __UpperCAmelCase ( self ) -> List[str]: _a = [ 'safety_checker/pytorch_model.fp16.bin', 'safety_checker/model.fp16.safetensors', 'vae/diffusion_pytorch_model.fp16.bin', 'vae/diffusion_pytorch_model.fp16.safetensors', 'text_encoder/pytorch_model.fp16.bin', 'text_encoder/model.fp16.safetensors', 'unet/diffusion_pytorch_model.fp16.bin', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _a = 'fp16' self.assertFalse(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) ) def __UpperCAmelCase ( self ) -> Union[str, Any]: _a = [ 'text_encoder/pytorch_model.fp16.bin', 'text_encoder/model.fp16.safetensors', ] _a = 'fp16' self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) ) def __UpperCAmelCase ( self ) -> List[Any]: # pass variant but use the non-variant filenames _a = [ 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', ] _a = 'fp16' self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) ) def __UpperCAmelCase ( self ) -> List[Any]: _a = [ 'safety_checker/pytorch_model.fp16.bin', 'safety_checker/model.fp16.safetensors', 'vae/diffusion_pytorch_model.fp16.bin', 'vae/diffusion_pytorch_model.fp16.safetensors', 'text_encoder/pytorch_model.fp16.bin', # 'text_encoder/model.fp16.safetensors', 'unet/diffusion_pytorch_model.fp16.bin', 'unet/diffusion_pytorch_model.fp16.safetensors', ] _a = 'fp16' self.assertFalse(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
104
0
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) _lowercase: Dict = 299792458 # Symbols _lowercase , _lowercase , _lowercase , _lowercase: Dict = symbols("ct x y z") def a( A : float ) -> float: """simple docstring""" if velocity > c: raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError("Speed must be greater than or equal to 1!" ) return velocity / c def a( A : float ) -> float: """simple docstring""" return 1 / sqrt(1 - beta(A ) ** 2 ) def a( A : float ) -> np.ndarray: """simple docstring""" return np.array( [ [gamma(A ), -gamma(A ) * beta(A ), 0, 0], [-gamma(A ) * beta(A ), gamma(A ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def a( A : float , A : np.ndarray | None = None ) -> np.ndarray: """simple docstring""" if event is None: a = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(A ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: _lowercase: Union[str, Any] = transform(29979245) print("Example of four vector: ") print(F"""ct' = {four_vector[0]}""") print(F"""x' = {four_vector[1]}""") print(F"""y' = {four_vector[2]}""") print(F"""z' = {four_vector[3]}""") # Substitute symbols with numerical values _lowercase: Tuple = {ct: c, x: 1, y: 1, z: 1} _lowercase: Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)] print(F"""\n{numerical_vector}""")
227
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def a( A : dict ) -> tuple: """simple docstring""" return (data["data"], data["target"]) def a( A : np.ndarray , A : np.ndarray ) -> XGBClassifier: """simple docstring""" a = XGBClassifier() classifier.fit(A , A ) return classifier def a( ) -> None: """simple docstring""" a = load_iris() a , a = data_handling(A ) a , a , a , a = train_test_split( A , A , test_size=0.25 ) a = iris["target_names"] # Create an XGBoost Classifier from the training data a = xgboost(A , A ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( A , A , A , display_labels=A , cmap="Blues" , normalize="true" , ) plt.title("Normalized Confusion Matrix - IRIS Dataset" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
227
1
'''simple docstring''' import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=1_3 , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : int=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : Any=3_3 , UpperCamelCase__ : Union[str, Any]=3_2 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Dict=3_7 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[Any]=5_1_2 , UpperCamelCase__ : Any=1_6 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Optional[Any]=0.0_2 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Optional[int]=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = num_choices UpperCamelCase = scope def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = None if self.use_input_mask: UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : Dict ): """simple docstring""" return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def A ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ): """simple docstring""" UpperCamelCase = EsmModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) UpperCamelCase = model(UpperCamelCase__ ) UpperCamelCase = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A ( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int ): """simple docstring""" UpperCamelCase = EsmForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = EsmForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : int ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE = () _SCREAMING_SNAKE_CASE = ( { """feature-extraction""": EsmModel, """fill-mask""": EsmForMaskedLM, """text-classification""": EsmForSequenceClassification, """token-classification""": EsmForTokenClassification, """zero-shot""": EsmForSequenceClassification, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE = True def A ( self : int ): """simple docstring""" UpperCamelCase = EsmModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7 ) def A ( self : Any ): """simple docstring""" self.config_tester.run_common_tests() def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def A ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def A ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def A ( self : Tuple ): """simple docstring""" for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = EsmModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def A ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs()[0] UpperCamelCase = EsmEmbeddings(config=UpperCamelCase__ ) UpperCamelCase = torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] ) UpperCamelCase = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) UpperCamelCase = create_position_ids_from_input_ids(UpperCamelCase__ , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(UpperCamelCase__ , UpperCamelCase__ ) ) ) def A ( self : int ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs()[0] UpperCamelCase = EsmEmbeddings(config=UpperCamelCase__ ) UpperCamelCase = torch.empty(2 , 4 , 3_0 ) UpperCamelCase = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] UpperCamelCase = torch.as_tensor([expected_single_positions, expected_single_positions] ) UpperCamelCase = embeddings.create_position_ids_from_inputs_embeds(UpperCamelCase__ ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(UpperCamelCase__ , UpperCamelCase__ ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def A ( self : Dict ): """simple docstring""" pass @unittest.skip('Esm does not support embedding resizing' ) def A ( self : str ): """simple docstring""" pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def A ( self : Optional[Any] ): """simple docstring""" pass @require_torch class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" @slow def A ( self : Any ): """simple docstring""" with torch.no_grad(): UpperCamelCase = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() UpperCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] ) UpperCamelCase = model(UpperCamelCase__ )[0] UpperCamelCase = 3_3 UpperCamelCase = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , UpperCamelCase__ ) UpperCamelCase = torch.tensor( [[[8.9_2_1_5, -1_0.5_8_9_8, -6.4_6_7_1], [-6.3_9_6_7, -1_3.9_1_1_4, -1.1_2_1_2], [-7.7_8_1_2, -1_3.9_5_1_6, -3.7_4_0_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def A ( self : Union[str, Any] ): """simple docstring""" with torch.no_grad(): UpperCamelCase = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() UpperCamelCase = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] ) UpperCamelCase = model(UpperCamelCase__ )[0] # compare the actual values for a slice. UpperCamelCase = torch.tensor( [[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
249
'''simple docstring''' def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__ ) -> int: """simple docstring""" if index == r: for j in range(A__ ): print(data[j] , end=' ' ) print(' ' ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location UpperCamelCase = arr[i] combination_util(A__ , A__ , A__ , index + 1 , A__ , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(A__ , A__ , A__ , A__ , A__ , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def __lowerCamelCase ( A__ , A__ , A__ ) -> Union[str, Any]: """simple docstring""" # A temporary array to store all combination one by one UpperCamelCase = [0] * r # Print all combination using temporary array 'data[]' combination_util(A__ , A__ , A__ , 0 , A__ , 0 ) if __name__ == "__main__": # Driver code to check the function above _lowerCamelCase : Optional[Any] = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
249
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor A =logging.get_logger(__name__) class _a ( __a ): def __init__( self : Optional[Any] , *lowercase : Optional[Any] , **lowercase : Optional[int] ): '''simple docstring''' warnings.warn( '''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use LayoutLMv2ImageProcessor instead.''' , lowercase , ) super().__init__(*lowercase , **lowercase )
34
"""simple docstring""" import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowercase ( __UpperCAmelCase): __lowerCAmelCase : str = ["""image_processor""", """tokenizer"""] __lowerCAmelCase : Optional[Any] = """OwlViTImageProcessor""" __lowerCAmelCase : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self : Union[str, Any] , _lowerCamelCase : str=None , _lowerCamelCase : Tuple=None , **_lowerCamelCase : List[Any] ): """simple docstring""" A_ : Optional[Any] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _lowerCamelCase , ) A_ : List[Any] = kwargs.pop('''feature_extractor''' ) A_ : Any = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_lowerCamelCase , _lowerCamelCase ) def __call__( self : Optional[int] , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=None , _lowerCamelCase : str="max_length" , _lowerCamelCase : List[Any]="np" , **_lowerCamelCase : Optional[int] ): """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( '''You have to specify at least one text or query image or image. All three cannot be none.''' ) if text is not None: if isinstance(_lowerCamelCase , _lowerCamelCase ) or (isinstance(_lowerCamelCase , _lowerCamelCase ) and not isinstance(text[0] , _lowerCamelCase )): A_ : List[str] = [self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )] elif isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(text[0] , _lowerCamelCase ): A_ : Optional[int] = [] # Maximum number of queries across batch A_ : Any = max([len(_lowerCamelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(_lowerCamelCase ) != max_num_queries: A_ : Optional[int] = t + [''' '''] * (max_num_queries - len(_lowerCamelCase )) A_ : Tuple = self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) encodings.append(_lowerCamelCase ) else: raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' ) if return_tensors == "np": A_ : Union[str, Any] = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) A_ : Dict = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp A_ : List[Any] = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) A_ : Any = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch A_ : Any = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 ) A_ : Union[str, Any] = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf A_ : Tuple = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) A_ : str = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) else: raise ValueError('''Target return tensor type could not be returned''' ) A_ : Any = BatchEncoding() A_ : Optional[Any] = input_ids A_ : str = attention_mask if query_images is not None: A_ : Union[str, Any] = BatchEncoding() A_ : Optional[Any] = self.image_processor( _lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ).pixel_values A_ : Dict = query_pixel_values if images is not None: A_ : int = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) if text is not None and images is not None: A_ : str = image_features.pixel_values return encoding elif query_images is not None and images is not None: A_ : Union[str, Any] = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase ) def a_ ( self : Optional[Any] , *_lowerCamelCase : int , **_lowerCamelCase : Dict ): """simple docstring""" return self.image_processor.post_process(*_lowerCamelCase , **_lowerCamelCase ) def a_ ( self : Optional[Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Dict ): """simple docstring""" return self.image_processor.post_process_object_detection(*_lowerCamelCase , **_lowerCamelCase ) def a_ ( self : List[Any] , *_lowerCamelCase : List[str] , **_lowerCamelCase : Optional[int] ): """simple docstring""" return self.image_processor.post_process_image_guided_detection(*_lowerCamelCase , **_lowerCamelCase ) def a_ ( self : str , *_lowerCamelCase : Tuple , **_lowerCamelCase : List[Any] ): """simple docstring""" return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase ) def a_ ( self : Dict , *_lowerCamelCase : Any , **_lowerCamelCase : Union[str, Any] ): """simple docstring""" return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase ) @property def a_ ( self : List[str] ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _lowerCamelCase , ) return self.image_processor_class @property def a_ ( self : Any ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _lowerCamelCase , ) return self.image_processor
167
0
'''simple docstring''' import os import re import shutil import sys import tempfile import unittest import black _lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _lowercase = """ \"\"\" Output class for the scheduler's step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \"\"\" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None """ class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) ) _lowerCAmelCase = self.diffusers_dir shutil.copy( os.path.join(_lowercase , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = """src/diffusers""" shutil.rmtree(self.diffusers_dir ) def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase=None ): """simple docstring""" _lowerCAmelCase = comment + F'\nclass {class_name}(nn.Module):\n' + class_code if overwrite_result is not None: _lowerCAmelCase = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result _lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) _lowerCAmelCase = black.format_str(_lowercase , mode=_lowercase ) _lowerCAmelCase = os.path.join(self.diffusers_dir , """new_code.py""" ) with open(_lowercase , """w""" , newline="""\n""" ) as f: f.write(_lowercase ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_lowercase ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_lowercase ) with open(_lowercase , """r""" ) as f: self.assertTrue(f.read() , _lowercase ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ) self.assertEqual(_lowercase , _lowercase ) def _lowercase ( self ): """simple docstring""" self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , ) # With no empty line at the end self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , _lowercase , ) # Copy consistency with rename self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , _lowercase ) , ) # Copy consistency with a really long name _lowerCAmelCase = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason""" self.check_copy_consistency( F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub("""Bert""" , _lowercase , _lowercase ) , ) # Copy consistency with overwrite self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , _lowercase , overwrite_result=re.sub("""DDPM""" , """Test""" , _lowercase ) , )
229
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : '''simple docstring''' def __init__( self , _lowercase , _lowercase=13 , _lowercase=32 , _lowercase=3 , _lowercase=4 , _lowercase=[10, 20, 30, 40] , _lowercase=[2, 2, 3, 2] , _lowercase=True , _lowercase=True , _lowercase=37 , _lowercase="gelu" , _lowercase=10 , _lowercase=0.02 , _lowercase=["stage2", "stage3", "stage4"] , _lowercase=3 , _lowercase=None , ): """simple docstring""" _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = image_size _lowerCAmelCase = num_channels _lowerCAmelCase = num_stages _lowerCAmelCase = hidden_sizes _lowerCAmelCase = depths _lowerCAmelCase = is_training _lowerCAmelCase = use_labels _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = out_features _lowerCAmelCase = num_labels _lowerCAmelCase = scope _lowerCAmelCase = num_stages def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = self.get_config() return config, pixel_values, labels def _lowercase ( self ): """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def _lowercase ( self ): """simple docstring""" return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowercase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowercase , loss_ignore_index=255 , num_labels=self.num_labels , ) def _lowercase ( self , _lowercase , _lowercase , _lowercase ): """simple docstring""" _lowerCAmelCase = UperNetForSemanticSegmentation(config=_lowercase ) model.to(_lowercase ) model.eval() _lowerCAmelCase = model(_lowercase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = config_and_inputs _lowerCAmelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' _lowercase : Dict = (UperNetForSemanticSegmentation,) if is_torch_available() else () _lowercase : Dict = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {} _lowercase : Dict = False _lowercase : Optional[Any] = False _lowercase : List[str] = False _lowercase : Union[str, Any] = False _lowercase : List[str] = False _lowercase : List[Any] = False def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = UperNetModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 ) def _lowercase ( self ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowercase ( self ): """simple docstring""" return def _lowercase ( self ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(_lowercase ) _lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase = [*signature.parameters.keys()] _lowerCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowercase ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip(reason="""UperNet does not have a base model""" ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip(reason="""UperNet does not have a base model""" ) def _lowercase ( self ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _lowercase ( self ): """simple docstring""" pass def _lowercase ( self ): """simple docstring""" def check_hidden_states_output(_lowercase , _lowercase , _lowercase ): _lowerCAmelCase = model_class(_lowercase ) model.to(_lowercase ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) ) _lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _lowerCAmelCase = self.model_tester.num_stages self.assertEqual(len(_lowercase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = True check_hidden_states_output(_lowercase , _lowercase , _lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase = True check_hidden_states_output(_lowercase , _lowercase , _lowercase ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = _config_zero_init(_lowercase ) _lowerCAmelCase = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: _lowerCAmelCase = model_class(config=_lowercase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def _lowercase ( self ): """simple docstring""" pass @slow def _lowercase ( self ): """simple docstring""" for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) def A (): _lowerCAmelCase = hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) _lowerCAmelCase = Image.open(__lowerCamelCase ).convert("""RGB""" ) return image @require_torch @require_vision @slow class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) _lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_lowercase ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase ) with torch.no_grad(): _lowerCAmelCase = model(**_lowercase ) _lowerCAmelCase = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _lowercase ) _lowerCAmelCase = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(_lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowercase , atol=1e-4 ) ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) _lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_lowercase ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase ) with torch.no_grad(): _lowerCAmelCase = model(**_lowercase ) _lowerCAmelCase = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _lowercase ) _lowerCAmelCase = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(_lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowercase , atol=1e-4 ) )
229
1
'''simple docstring''' def __UpperCAmelCase ( a_: int, a_: int ): return int(input_a == input_a == 0 ) def __UpperCAmelCase ( ): print("Truth Table of NOR Gate:" ) print("| Input 1 | Input 2 | Output |" ) print(f"""| 0 | 0 | {nor_gate(0, 0 )} |""" ) print(f"""| 0 | 1 | {nor_gate(0, 1 )} |""" ) print(f"""| 1 | 0 | {nor_gate(1, 0 )} |""" ) print(f"""| 1 | 1 | {nor_gate(1, 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
145
'''simple docstring''' from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging __a = logging.get_logger(__name__) # pylint: disable=invalid-name class A__ ( UpperCamelCase ): """simple docstring""" def __init__( self : List[Any] , lowerCAmelCase__ : CLIPSegForImageSegmentation , lowerCAmelCase__ : CLIPSegProcessor , lowerCAmelCase__ : AutoencoderKL , lowerCAmelCase__ : CLIPTextModel , lowerCAmelCase__ : CLIPTokenizer , lowerCAmelCase__ : UNetaDConditionModel , lowerCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase__ : StableDiffusionSafetyChecker , lowerCAmelCase__ : CLIPImageProcessor , ) -> Dict: """simple docstring""" super().__init__() if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1: _UpperCAmelCase : str = ( F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`""" F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """ "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1" , "1.0.0" , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ ) _UpperCAmelCase : Any = dict(scheduler.config ) _UpperCAmelCase : Tuple = 1 _UpperCAmelCase : Optional[Any] = FrozenDict(lowerCAmelCase__ ) if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False: _UpperCAmelCase : Union[str, Any] = ( F"""The configuration file of this scheduler: {scheduler} has not set the configuration""" " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" " Hub, it would be very nice if you could open a Pull request for the" " `scheduler/scheduler_config.json` file" ) deprecate("skip_prk_steps not set" , "1.0.0" , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ ) _UpperCAmelCase : List[str] = dict(scheduler.config ) _UpperCAmelCase : Optional[Any] = True _UpperCAmelCase : Dict = FrozenDict(lowerCAmelCase__ ) if safety_checker is None: logger.warning( F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( segmentation_model=lowerCAmelCase__ , segmentation_processor=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , ) def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Optional[Union[str, int]] = "auto" ) -> Optional[int]: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _UpperCAmelCase : Tuple = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCAmelCase__ ) def _lowerCAmelCase ( self : Optional[int] ) -> Any: """simple docstring""" self.enable_attention_slicing(lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) _UpperCAmelCase : Dict = torch.device("cuda" ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(lowerCAmelCase__ , lowerCAmelCase__ ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _lowerCAmelCase ( self : Any ) -> int: """simple docstring""" if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(lowerCAmelCase__ , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__( self : Optional[int] , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase__ : str , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : float = 7.5 , lowerCAmelCase__ : Optional[Union[str, List[str]]] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , **lowerCAmelCase__ : Any , ) -> Tuple: """simple docstring""" _UpperCAmelCase : List[str] = self.segmentation_processor( text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device ) _UpperCAmelCase : List[Any] = self.segmentation_model(**lowerCAmelCase__ ) _UpperCAmelCase : Any = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() _UpperCAmelCase : str = self.numpy_to_pil(lowerCAmelCase__ )[0].resize(image.size ) # Run inpainting pipeline with the generated mask _UpperCAmelCase : Union[str, Any] = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , )
145
1
def __lowerCAmelCase ( a__ = 100_0000 ) -> int: __a = set(range(3 , a__ , 2 ) ) primes.add(2 ) for p in range(3 , a__ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , a__ , a__ ) ) ) __a = [float(a__ ) for n in range(limit + 1 )] for p in primes: for n in range(a__ , limit + 1 , a__ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F"{solution() = }")
33
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A : Dict = { 'configuration_instructblip': [ 'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InstructBlipConfig', 'InstructBlipQFormerConfig', 'InstructBlipVisionConfig', ], 'processing_instructblip': ['InstructBlipProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Union[str, Any] = [ 'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'InstructBlipQFormerModel', 'InstructBlipPreTrainedModel', 'InstructBlipForConditionalGeneration', 'InstructBlipVisionModel', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
33
1
'''simple docstring''' import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels UpperCAmelCase_ = object() # For specifying empty leaf dict `{}` UpperCAmelCase_ = object() def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ = tuple((re.compile(x + """$""" ) for x in qs) ) for i in range(len(SCREAMING_SNAKE_CASE__ ) - len(SCREAMING_SNAKE_CASE__ ) + 1 ): UpperCAmelCase__ = [x.match(SCREAMING_SNAKE_CASE__ ) for x, y in zip(SCREAMING_SNAKE_CASE__ , ks[i:] )] if matches and all(SCREAMING_SNAKE_CASE__ ): return True return False def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' def replace(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ): for rule, replacement in rules: if _match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return replacement return val return replace def _UpperCamelCase ( ): '''simple docstring''' return [ # embeddings (("transformer", "wpe", "embedding"), P("""mp""" , SCREAMING_SNAKE_CASE__ )), (("transformer", "wte", "embedding"), P("""mp""" , SCREAMING_SNAKE_CASE__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(SCREAMING_SNAKE_CASE__ , """mp""" )), (("attention", "out_proj", "kernel"), P("""mp""" , SCREAMING_SNAKE_CASE__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(SCREAMING_SNAKE_CASE__ , """mp""" )), (("mlp", "c_fc", "bias"), P("""mp""" )), (("mlp", "c_proj", "kernel"), P("""mp""" , SCREAMING_SNAKE_CASE__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' UpperCAmelCase__ = _get_partition_rules() UpperCAmelCase__ = _replacement_rules(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = {k: _unmatched for k in flatten_dict(SCREAMING_SNAKE_CASE__ )} UpperCAmelCase__ = {k: replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(SCREAMING_SNAKE_CASE__ ) )
346
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[str, Any] = (PNDMScheduler,) lowerCAmelCase_ : Optional[int] = (("""num_inference_steps""", 50),) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = { """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**_UpperCAmelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : int , **_UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = 10 UpperCAmelCase__ = self.dummy_model() UpperCAmelCase__ = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ): scheduler.set_timesteps(_UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ): UpperCAmelCase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_UpperCAmelCase ) UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" for t in [1, 5, 10]: self.check_over_forward(time_step=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" with self.assertRaises(_UpperCAmelCase ): UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = self.full_loop() UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1E-2 assert abs(result_mean.item() - 0.2580 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.full_loop(prediction_type="""v_prediction""" ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1E-2 assert abs(result_mean.item() - 0.0878 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1E-2 assert abs(result_mean.item() - 0.2995 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1E-2 assert abs(result_mean.item() - 0.2434 ) < 1E-3
346
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self ): _lowercase : str = tempfile.mkdtemp() # fmt: off _lowercase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on _lowercase : Optional[int] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) ) _lowercase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] _lowercase : Optional[int] = {"""unk_token""": """<unk>"""} _lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) _lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCAmelCase_ ) ) _lowercase : Dict = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48145466, 0.4578275, 0.40821073], """image_std""": [0.26862954, 0.26130258, 0.27577711], } _lowercase : List[Any] = os.path.join(self.tmpdirname ,UpperCAmelCase_ ) with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp: json.dump(UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ): shutil.rmtree(self.tmpdirname ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] _lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = self.get_tokenizer() _lowercase : List[Any] = self.get_rust_tokenizer() _lowercase : List[Any] = self.get_image_processor() _lowercase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) processor_slow.save_pretrained(self.tmpdirname ) _lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCAmelCase_ ) _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) processor_fast.save_pretrained(self.tmpdirname ) _lowercase : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,UpperCAmelCase_ ) self.assertIsInstance(processor_fast.tokenizer ,UpperCAmelCase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,UpperCAmelCase_ ) self.assertIsInstance(processor_fast.image_processor ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : str = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) _lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 ) _lowercase : int = CLIPProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : Optional[int] = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : int = self.prepare_image_inputs() _lowercase : str = image_processor(UpperCAmelCase_ ,return_tensors="""np""" ) _lowercase : int = processor(images=UpperCAmelCase_ ,return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : Optional[Any] = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : List[Any] = """lower newer""" _lowercase : Any = processor(text=UpperCAmelCase_ ) _lowercase : Union[str, Any] = tokenizer(UpperCAmelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : str = """lower newer""" _lowercase : List[Any] = self.prepare_image_inputs() _lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase_ ): processor() def lowerCamelCase__ ( self ): _lowercase : Dict = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowercase : int = processor.batch_decode(UpperCAmelCase_ ) _lowercase : Tuple = tokenizer.batch_decode(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : Optional[Any] = """lower newer""" _lowercase : Any = self.prepare_image_inputs() _lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
336
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( ): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] UpperCAmelCase: Any = generate_large_matrix() UpperCAmelCase: Dict = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid ) assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Tuple = 0 _lowercase : List[Any] = len(__UpperCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: _lowercase : Tuple = (left + right) // 2 _lowercase : List[Any] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: _lowercase : Dict = mid + 1 else: _lowercase : Dict = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Any = 0 _lowercase : Optional[int] = len(grid[0] ) for i in range(len(__UpperCAmelCase ) ): _lowercase : Union[str, Any] = find_negative_index(grid[i][:bound] ) total += bound return (len(__UpperCAmelCase ) * len(grid[0] )) - total def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): return len([number for row in grid for number in row if number < 0] ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Tuple = 0 for row in grid: for i, number in enumerate(__UpperCAmelCase ): if number < 0: total += len(__UpperCAmelCase ) - i break return total def __SCREAMING_SNAKE_CASE ( ): from timeit import timeit print("""Running benchmarks""" ) _lowercase : Tuple = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): _lowercase : Dict = timeit(F"""{func}(grid=grid)""" , setup=__UpperCAmelCase , number=500 ) print(F"""{func}() took {time:0.4f} seconds""" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
336
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json', 'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json', 'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json', # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ): _a = """big_bird""" def __init__( self , lowerCAmelCase=50_358 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3_072 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=4_096 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-12 , lowerCAmelCase=True , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=66 , lowerCAmelCase="block_sparse" , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=64 , lowerCAmelCase=3 , lowerCAmelCase=None , **lowerCAmelCase , ) -> Dict: '''simple docstring''' super().__init__( pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , sep_token_id=lowerCAmelCase , **lowerCAmelCase , ) _lowercase =vocab_size _lowercase =max_position_embeddings _lowercase =hidden_size _lowercase =num_hidden_layers _lowercase =num_attention_heads _lowercase =intermediate_size _lowercase =hidden_act _lowercase =hidden_dropout_prob _lowercase =attention_probs_dropout_prob _lowercase =initializer_range _lowercase =type_vocab_size _lowercase =layer_norm_eps _lowercase =use_cache _lowercase =rescale_embeddings _lowercase =attention_type _lowercase =use_bias _lowercase =block_size _lowercase =num_random_blocks _lowercase =classifier_dropout class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ): @property def A__ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": _lowercase ={0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowercase ={0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
205
import argparse import math import traceback import dateutil.parser as date_parser import requests def a ( A__ : Dict ) -> str: """simple docstring""" _lowercase ={} _lowercase =job['started_at'] _lowercase =job['completed_at'] _lowercase =date_parser.parse(A__ ) _lowercase =date_parser.parse(A__ ) _lowercase =round((end_datetime - start_datetime).total_seconds() / 60.0 ) _lowercase =start _lowercase =end _lowercase =duration_in_min return job_info def a ( A__ : Dict , A__ : str=None ) -> Tuple: """simple docstring""" _lowercase =None if token is not None: _lowercase ={'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''} _lowercase =F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' _lowercase =requests.get(A__ , headers=A__ ).json() _lowercase ={} try: job_time.update({job['name']: extract_time_from_single_job(A__ ) for job in result['jobs']} ) _lowercase =math.ceil((result['total_count'] - 100) / 100 ) for i in range(A__ ): _lowercase =requests.get(url + F'''&page={i + 2}''' , headers=A__ ).json() job_time.update({job['name']: extract_time_from_single_job(A__ ) for job in result['jobs']} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') lowercase_ = parser.parse_args() lowercase_ = get_job_time(args.workflow_run_id) lowercase_ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f"{k}: {v['duration']}")
205
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ =logging.get_logger(__name__) UpperCamelCase__ ={'vocab_file': 'sentencepiece.bpe.model'} UpperCamelCase__ ={ 'vocab_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model' ), }, } UpperCamelCase__ ={ 'moussaKam/mbarthez': 1024, 'moussaKam/barthez': 1024, 'moussaKam/barthez-orangesum-title': 1024, } UpperCamelCase__ ='▁' class lowerCAmelCase__( __lowercase ): '''simple docstring''' __snake_case = VOCAB_FILES_NAMES __snake_case = PRETRAINED_VOCAB_FILES_MAP __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case = ['input_ids', 'attention_mask'] def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase = None , **__lowerCamelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _SCREAMING_SNAKE_CASE : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token _SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_file _SCREAMING_SNAKE_CASE : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__lowerCamelCase ) ) _SCREAMING_SNAKE_CASE : Tuple = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} _SCREAMING_SNAKE_CASE : Optional[Any] = len(self.sp_model ) - 1 _SCREAMING_SNAKE_CASE : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id] _SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]: _SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id] _SCREAMING_SNAKE_CASE : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase_ ( self ) -> Optional[Any]: return len(self.sp_model ) def UpperCamelCase_ ( self ) -> int: _SCREAMING_SNAKE_CASE : Union[str, Any] = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]: return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase ) -> Union[str, Any]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _SCREAMING_SNAKE_CASE : Dict = self.sp_model.PieceToId(__lowerCamelCase ) return spm_id if spm_id else self.unk_token_id def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]: _SCREAMING_SNAKE_CASE : Optional[Any] = [] _SCREAMING_SNAKE_CASE : Tuple = "" _SCREAMING_SNAKE_CASE : Optional[int] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__lowerCamelCase ) + token _SCREAMING_SNAKE_CASE : Union[str, Any] = True _SCREAMING_SNAKE_CASE : str = [] else: current_sub_tokens.append(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = False out_string += self.sp_model.decode(__lowerCamelCase ) return out_string.strip() def __getstate__( self ) -> int: _SCREAMING_SNAKE_CASE : List[Any] = self.__dict__.copy() _SCREAMING_SNAKE_CASE : Union[str, Any] = None return state def __setstate__( self , __lowerCamelCase ) -> Optional[int]: _SCREAMING_SNAKE_CASE : List[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _SCREAMING_SNAKE_CASE : Dict = {} _SCREAMING_SNAKE_CASE : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _SCREAMING_SNAKE_CASE : Tuple = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase , "wb" ) as fi: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,)
325
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ =logging.get_logger(__name__) UpperCamelCase__ ={'vocab_file': 'spiece.model'} UpperCamelCase__ ={ 'vocab_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model', } } UpperCamelCase__ ={ 'albert-base-v1': 512, 'albert-large-v1': 512, 'albert-xlarge-v1': 512, 'albert-xxlarge-v1': 512, 'albert-base-v2': 512, 'albert-large-v2': 512, 'albert-xlarge-v2': 512, 'albert-xxlarge-v2': 512, } UpperCamelCase__ ='▁' class lowerCAmelCase__( __lowercase ): '''simple docstring''' __snake_case = VOCAB_FILES_NAMES __snake_case = PRETRAINED_VOCAB_FILES_MAP __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase="[CLS]" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<unk>" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<pad>" , __lowerCamelCase="[CLS]" , __lowerCamelCase="[MASK]" , __lowerCamelCase = None , **__lowerCamelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _SCREAMING_SNAKE_CASE : List[Any] = ( AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase , normalized=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token ) _SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , ) _SCREAMING_SNAKE_CASE : Dict = do_lower_case _SCREAMING_SNAKE_CASE : List[Any] = remove_space _SCREAMING_SNAKE_CASE : str = keep_accents _SCREAMING_SNAKE_CASE : Optional[int] = vocab_file _SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCamelCase ) @property def UpperCamelCase_ ( self ) -> Optional[Any]: return len(self.sp_model ) def UpperCamelCase_ ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> List[Any]: _SCREAMING_SNAKE_CASE : str = self.__dict__.copy() _SCREAMING_SNAKE_CASE : Optional[Any] = None return state def __setstate__( self , __lowerCamelCase ) -> Tuple: _SCREAMING_SNAKE_CASE : Optional[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _SCREAMING_SNAKE_CASE : Optional[int] = {} _SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]: if self.remove_space: _SCREAMING_SNAKE_CASE : List[str] = " ".join(inputs.strip().split() ) else: _SCREAMING_SNAKE_CASE : Optional[Any] = inputs _SCREAMING_SNAKE_CASE : str = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: _SCREAMING_SNAKE_CASE : str = unicodedata.normalize("NFKD" , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] ) if self.do_lower_case: _SCREAMING_SNAKE_CASE : Dict = outputs.lower() return outputs def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]: _SCREAMING_SNAKE_CASE : int = self.preprocess_text(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = [] for piece in pieces: if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): _SCREAMING_SNAKE_CASE : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _SCREAMING_SNAKE_CASE : Union[str, Any] = cur_pieces[1:] else: _SCREAMING_SNAKE_CASE : Tuple = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__lowerCamelCase ) else: new_pieces.append(__lowerCamelCase ) return new_pieces def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[Any]: return self.sp_model.PieceToId(__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase ) -> str: return self.sp_model.IdToPiece(__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict: _SCREAMING_SNAKE_CASE : Dict = [] _SCREAMING_SNAKE_CASE : List[str] = "" _SCREAMING_SNAKE_CASE : List[Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__lowerCamelCase ) + token _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Optional[Any] = [] else: current_sub_tokens.append(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = False out_string += self.sp_model.decode(__lowerCamelCase ) return out_string.strip() def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]: _SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id] _SCREAMING_SNAKE_CASE : int = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is not None: return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1] def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]: _SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id] _SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _SCREAMING_SNAKE_CASE : List[Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase , "wb" ) as fi: _SCREAMING_SNAKE_CASE : Optional[int] = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,)
325
1
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin a : Dict = get_tests_dir("fixtures/test_sentencepiece.model") a : str = {"target_lang": "fi", "source_lang": "en"} a : Tuple = ">>zh<<" a : int = "Helsinki-NLP/" if is_torch_available(): a : Any = "pt" elif is_tf_available(): a : List[str] = "tf" else: a : Optional[Any] = "jax" @require_sentencepiece class UpperCamelCase__ ( lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = MarianTokenizer SCREAMING_SNAKE_CASE__ : str = False SCREAMING_SNAKE_CASE__ : Tuple = True def A_ ( self ): '''simple docstring''' super().setUp() UpperCAmelCase : List[str] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] UpperCAmelCase : str = dict(zip(snake_case , range(len(snake_case ) ) ) ) UpperCAmelCase : Union[str, Any] = Path(self.tmpdirname ) save_json(snake_case , save_dir / VOCAB_FILES_NAMES["vocab"] ) save_json(snake_case , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(snake_case , save_dir / VOCAB_FILES_NAMES["source_spm"] ) copyfile(snake_case , save_dir / VOCAB_FILES_NAMES["target_spm"] ) UpperCAmelCase : Union[str, Any] = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def A_ ( self , **snake_case ): '''simple docstring''' return MarianTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def A_ ( self , snake_case ): '''simple docstring''' return ( "This is a test", "This is a test", ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = "</s>" UpperCAmelCase : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<pad>" ) self.assertEqual(len(snake_case ) , 9 ) def A_ ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" ) UpperCAmelCase : int = en_de_tokenizer(["I am a small frog"] , return_tensors=snake_case ) self.assertIsInstance(snake_case , snake_case ) UpperCAmelCase : List[Any] = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0] self.assertListEqual(snake_case , batch.input_ids[0] ) UpperCAmelCase : Union[str, Any] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(snake_case ) UpperCAmelCase : Union[str, Any] = [x.name for x in Path(snake_case ).glob("*" )] self.assertIn("source.spm" , snake_case ) MarianTokenizer.from_pretrained(snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.get_tokenizer() UpperCAmelCase : Union[str, Any] = tok( ["I am a small frog" * 1_0_0_0, "I am a small frog"] , padding=snake_case , truncation=snake_case , return_tensors=snake_case ) self.assertIsInstance(snake_case , snake_case ) self.assertEqual(batch.input_ids.shape , (2, 5_1_2) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = self.get_tokenizer() UpperCAmelCase : List[str] = tok(["I am a tiny frog", "I am a small frog"] , padding=snake_case , return_tensors=snake_case ) self.assertIsInstance(snake_case , snake_case ) self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) ) @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = {"input_ids": [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" ) UpperCAmelCase : Optional[Any] = "Tämä on testi" UpperCAmelCase : List[Any] = "This is a test" UpperCAmelCase : Tuple = [7_6, 7, 2_0_4_7, 2] UpperCAmelCase : int = [6_9, 1_2, 1_1, 9_4_0, 2] UpperCAmelCase : Any = tokenizer(snake_case ).input_ids self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : List[str] = tokenizer(text_target=snake_case ).input_ids self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : str = tokenizer.decode(snake_case , skip_special_tokens=snake_case ) self.assertEqual(snake_case , snake_case )
311
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils ) UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) UpperCAmelCase : Optional[int] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path] print(f"Command: {cmd}" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" ) UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ): execute_subprocess_async(snake_case , env=os.environ.copy() ) if __name__ == "__main__": a : Union[str, Any] = Accelerator() a : str = (accelerator.state.process_index + 2, 10) a : List[str] = torch.randint(0, 10, shape).to(accelerator.device) a : Optional[int] = "" a : int = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
311
1
'''simple docstring''' import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class UpperCAmelCase_ : def __init__( self : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=1_3 , UpperCAmelCase__ : Any=3_2 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Union[str, Any]=1_6 , UpperCAmelCase__ : List[Any]=[1, 2, 1] , UpperCAmelCase__ : int=[2, 2, 4] , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[Any]=2.0 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : int=1E-5 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : List[Any]=1_0 , UpperCAmelCase__ : Tuple=8 , UpperCAmelCase__ : str=["stage1", "stage2", "stage3"] , UpperCAmelCase__ : Tuple=[1, 2, 3] , ) -> Dict: lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = image_size lowerCAmelCase = patch_size lowerCAmelCase = num_channels lowerCAmelCase = embed_dim lowerCAmelCase = depths lowerCAmelCase = num_heads lowerCAmelCase = window_size lowerCAmelCase = mlp_ratio lowerCAmelCase = qkv_bias lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = drop_path_rate lowerCAmelCase = hidden_act lowerCAmelCase = use_absolute_embeddings lowerCAmelCase = patch_norm lowerCAmelCase = layer_norm_eps lowerCAmelCase = initializer_range lowerCAmelCase = is_training lowerCAmelCase = scope lowerCAmelCase = use_labels lowerCAmelCase = type_sequence_label_size lowerCAmelCase = encoder_stride lowerCAmelCase = out_features lowerCAmelCase = out_indices def __UpperCAmelCase ( self : str ) -> Optional[Any]: lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = self.get_config() return config, pixel_values, labels def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def __UpperCAmelCase ( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> Any: lowerCAmelCase = MaskFormerSwinModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() lowerCAmelCase = model(_lowerCamelCase ) lowerCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] ) -> Dict: lowerCAmelCase = MaskFormerSwinBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() lowerCAmelCase = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] ) # verify ValueError with self.parent.assertRaises(_lowerCamelCase ): lowerCAmelCase = ['''stem'''] lowerCAmelCase = MaskFormerSwinBackbone(config=_lowerCamelCase ) def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: lowerCAmelCase = self.prepare_config_and_inputs() lowerCAmelCase = config_and_inputs lowerCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( a__ , a__ , unittest.TestCase ): lowerCamelCase : Any = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) lowerCamelCase : int = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {} lowerCamelCase : Tuple = False lowerCamelCase : str = False lowerCamelCase : Union[str, Any] = False lowerCamelCase : Any = False lowerCamelCase : Optional[Any] = False def __UpperCAmelCase ( self : Any ) -> Tuple: lowerCAmelCase = MaskFormerSwinModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_lowerCamelCase , embed_dim=3_7 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: pass def __UpperCAmelCase ( self : int ) -> List[str]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __UpperCAmelCase ( self : Optional[int] ) -> Any: return def __UpperCAmelCase ( self : Dict ) -> str: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_lowerCamelCase ) @unittest.skip('Swin does not use inputs_embeds' ) def __UpperCAmelCase ( self : Tuple ) -> Any: pass @unittest.skip('Swin does not support feedforward chunking' ) def __UpperCAmelCase ( self : str ) -> int: pass def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase = model_class(_lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) ) def __UpperCAmelCase ( self : Dict ) -> List[Any]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase = model_class(_lowerCamelCase ) lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase = [*signature.parameters.keys()] lowerCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def __UpperCAmelCase ( self : Optional[int] ) -> Dict: pass def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict ) -> str: lowerCAmelCase = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): lowerCAmelCase = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) lowerCAmelCase = outputs.hidden_states lowerCAmelCase = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase ) # Swin has a different seq_length lowerCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def __UpperCAmelCase ( self : Dict ) -> Dict: lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCAmelCase = True self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase = True self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def __UpperCAmelCase ( self : List[str] ) -> int: lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCAmelCase = True self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase = True self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def __UpperCAmelCase ( self : Dict ) -> Tuple: pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def __UpperCAmelCase ( self : Optional[Any] ) -> int: pass def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(UpperCAmelCase__ : Optional[int] ): lowerCAmelCase = 0 return t def check_equivalence(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any]={} ): with torch.no_grad(): lowerCAmelCase = model(**_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ) lowerCAmelCase = model(**_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple() def recursive_check(UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] ): if isinstance(_lowerCamelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ): recursive_check(_lowerCamelCase , _lowerCamelCase ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(_lowerCamelCase , _lowerCamelCase ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_lowerCamelCase ) , set_nan_tensor_to_zero(_lowerCamelCase ) , atol=1E-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' F''' {torch.isnan(_lowerCamelCase ).any()} and `inf`: {torch.isinf(_lowerCamelCase )}. Dict has''' F''' `nan`: {torch.isnan(_lowerCamelCase ).any()} and `inf`: {torch.isinf(_lowerCamelCase )}.''' ) , ) recursive_check(_lowerCamelCase , _lowerCamelCase ) for model_class in self.all_model_classes: lowerCAmelCase = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() lowerCAmelCase = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) lowerCAmelCase = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) lowerCAmelCase = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) lowerCAmelCase = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) lowerCAmelCase = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) lowerCAmelCase = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {'output_hidden_states': True} ) lowerCAmelCase = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) lowerCAmelCase = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {'output_hidden_states': True} ) @require_torch class UpperCAmelCase_ ( unittest.TestCase , a__ ): lowerCamelCase : str = (MaskFormerSwinBackbone,) if is_torch_available() else () lowerCamelCase : str = MaskFormerSwinConfig def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: lowerCAmelCase = MaskFormerSwinModelTester(self ) def __UpperCAmelCase ( self : Dict ) -> List[str]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = inputs_dict['''pixel_values'''].shape[0] for backbone_class in self.all_model_classes: lowerCAmelCase = backbone_class(_lowerCamelCase ) backbone.to(_lowerCamelCase ) backbone.eval() lowerCAmelCase = backbone(**_lowerCamelCase ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , _lowerCamelCase ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True lowerCAmelCase = backbone(**_lowerCamelCase , output_hidden_states=_lowerCamelCase ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) lowerCAmelCase = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: lowerCAmelCase = backbone(**_lowerCamelCase , output_attentions=_lowerCamelCase ) self.assertIsNotNone(outputs.attentions )
369
'''simple docstring''' import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class UpperCAmelCase_ ( unittest.TestCase ): def __UpperCAmelCase ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int: lowerCAmelCase = jnp.ones((batch_size, length) ) / length return scores def __UpperCAmelCase ( self : Optional[Any] ) -> str: lowerCAmelCase = None lowerCAmelCase = 2_0 lowerCAmelCase = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase__ ) # tweak scores to not be uniform anymore lowerCAmelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch lowerCAmelCase = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax lowerCAmelCase = jax.nn.softmax(UpperCAmelCase__ , axis=-1 ) lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=1.3 ) lowerCAmelCase = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__ ) , axis=-1 ) lowerCAmelCase = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__ ) , axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Any: lowerCAmelCase = None lowerCAmelCase = 1_0 lowerCAmelCase = 2 # create ramp distribution lowerCAmelCase = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy() lowerCAmelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size lowerCAmelCase = FlaxTopKLogitsWarper(3 ) lowerCAmelCase = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] ) # check special case lowerCAmelCase = 5 lowerCAmelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 ) lowerCAmelCase = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :] , (batch_size, length) ).copy() lowerCAmelCase = top_k_warp_safety_check(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] ) def __UpperCAmelCase ( self : Any ) -> str: lowerCAmelCase = None lowerCAmelCase = 1_0 lowerCAmelCase = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) lowerCAmelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) lowerCAmelCase = FlaxTopPLogitsWarper(0.8 ) lowerCAmelCase = np.exp(top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 lowerCAmelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) # check edge cases with negative and extreme logits lowerCAmelCase = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme lowerCAmelCase = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept lowerCAmelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 ) lowerCAmelCase = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] ) def __UpperCAmelCase ( self : int ) -> Union[str, Any]: lowerCAmelCase = 2_0 lowerCAmelCase = 4 lowerCAmelCase = 0 lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=UpperCAmelCase__ ) # check that min length is applied at length 5 lowerCAmelCase = ids_tensor((batch_size, 2_0) , vocab_size=2_0 ) lowerCAmelCase = 5 lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('inf' )] ) # check that min length is not applied anymore at length 15 lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = 1_5 lowerCAmelCase = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() ) def __UpperCAmelCase ( self : Dict ) -> str: lowerCAmelCase = 2_0 lowerCAmelCase = 4 lowerCAmelCase = 0 lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ ) # check that all scores are -inf except the bos_token_id score lowerCAmelCase = ids_tensor((batch_size, 1) , vocab_size=2_0 ) lowerCAmelCase = 1 lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 lowerCAmelCase = 3 lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Any: lowerCAmelCase = 2_0 lowerCAmelCase = 4 lowerCAmelCase = 0 lowerCAmelCase = 5 lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ ) # check that all scores are -inf except the eos_token_id when max_length is reached lowerCAmelCase = ids_tensor((batch_size, 4) , vocab_size=2_0 ) lowerCAmelCase = 4 lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached lowerCAmelCase = 3 lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() ) def __UpperCAmelCase ( self : Optional[Any] ) -> Dict: lowerCAmelCase = 4 lowerCAmelCase = 1_0 lowerCAmelCase = 1_5 lowerCAmelCase = 2 lowerCAmelCase = 1 lowerCAmelCase = 1_5 # dummy input_ids and scores lowerCAmelCase = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__ ) lowerCAmelCase = input_ids.copy() lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = scores.copy() # instantiate all dist processors lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCAmelCase = FlaxTopKLogitsWarper(3 ) lowerCAmelCase = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=UpperCAmelCase__ ) lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ ) lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ ) lowerCAmelCase = 1_0 # no processor list lowerCAmelCase = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) lowerCAmelCase = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) lowerCAmelCase = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) lowerCAmelCase = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) lowerCAmelCase = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) lowerCAmelCase = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) # with processor list lowerCAmelCase = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCAmelCase = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) # scores should be equal self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) def __UpperCAmelCase ( self : int ) -> List[str]: lowerCAmelCase = 4 lowerCAmelCase = 1_0 lowerCAmelCase = 1_5 lowerCAmelCase = 2 lowerCAmelCase = 1 lowerCAmelCase = 1_5 # dummy input_ids and scores lowerCAmelCase = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__ ) lowerCAmelCase = input_ids.copy() lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = scores.copy() # instantiate all dist processors lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCAmelCase = FlaxTopKLogitsWarper(3 ) lowerCAmelCase = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=UpperCAmelCase__ ) lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ ) lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ ) lowerCAmelCase = 1_0 # no processor list def run_no_processor_list(UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ): lowerCAmelCase = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) lowerCAmelCase = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) lowerCAmelCase = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) lowerCAmelCase = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) lowerCAmelCase = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) lowerCAmelCase = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) return scores # with processor list def run_processor_list(UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] ): lowerCAmelCase = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCAmelCase = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) return scores lowerCAmelCase = jax.jit(UpperCAmelCase__ ) lowerCAmelCase = jax.jit(UpperCAmelCase__ ) lowerCAmelCase = jitted_run_no_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = jitted_run_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # scores should be equal self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
55
0
from __future__ import annotations UpperCAmelCase_ : Union[str, Any] = tuple[int, int, int] UpperCAmelCase_ : Optional[Any] = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase UpperCAmelCase_ : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' # -------------------------- default selection -------------------------- # rotors -------------------------- UpperCAmelCase_ : Any = '''EGZWVONAHDCLFQMSIPJBYUKXTR''' UpperCAmelCase_ : Any = '''FOBHMDKEXQNRAULPGSJVTYICZW''' UpperCAmelCase_ : Union[str, Any] = '''ZJXESIUQLHAVRMDOYGTNFWPBKC''' # reflector -------------------------- UpperCAmelCase_ : Any = { '''A''': '''N''', '''N''': '''A''', '''B''': '''O''', '''O''': '''B''', '''C''': '''P''', '''P''': '''C''', '''D''': '''Q''', '''Q''': '''D''', '''E''': '''R''', '''R''': '''E''', '''F''': '''S''', '''S''': '''F''', '''G''': '''T''', '''T''': '''G''', '''H''': '''U''', '''U''': '''H''', '''I''': '''V''', '''V''': '''I''', '''J''': '''W''', '''W''': '''J''', '''K''': '''X''', '''X''': '''K''', '''L''': '''Y''', '''Y''': '''L''', '''M''': '''Z''', '''Z''': '''M''', } # -------------------------- extra rotors -------------------------- UpperCAmelCase_ : Optional[Any] = '''RMDJXFUWGISLHVTCQNKYPBEZOA''' UpperCAmelCase_ : List[str] = '''SGLCPQWZHKXAREONTFBVIYJUDM''' UpperCAmelCase_ : Optional[Any] = '''HVSICLTYKQUBXDWAJZOMFGPREN''' UpperCAmelCase_ : Union[str, Any] = '''RZWQHFMVDBKICJLNTUXAGYPSOE''' UpperCAmelCase_ : Tuple = '''LFKIJODBEGAMQPXVUHYSTCZRWN''' UpperCAmelCase_ : int = '''KOAEGVDHXPQZMLFTYWJNBRCIUS''' def SCREAMING_SNAKE_CASE_ ( __magic_name__ : RotorPositionT , __magic_name__ : RotorSelectionT , __magic_name__ : str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]: """simple docstring""" if (unique_rotsel := len(set(__magic_name__ ) )) < 3: UpperCamelCase :str = f"""Please use 3 unique rotors (not {unique_rotsel})""" raise Exception(__magic_name__ ) # Checks if rotor positions are valid UpperCamelCase , UpperCamelCase , UpperCamelCase :str = rotpos if not 0 < rotorposa <= len(__magic_name__ ): UpperCamelCase :Union[str, Any] = f"""First rotor position is not within range of 1..26 ({rotorposa}""" raise ValueError(__magic_name__ ) if not 0 < rotorposa <= len(__magic_name__ ): UpperCamelCase :int = f"""Second rotor position is not within range of 1..26 ({rotorposa})""" raise ValueError(__magic_name__ ) if not 0 < rotorposa <= len(__magic_name__ ): UpperCamelCase :List[Any] = f"""Third rotor position is not within range of 1..26 ({rotorposa})""" raise ValueError(__magic_name__ ) # Validates string and returns dict UpperCamelCase :Any = _plugboard(__magic_name__ ) return rotpos, rotsel, pbdict def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> dict[str, str]: """simple docstring""" if not isinstance(__magic_name__ , __magic_name__ ): UpperCamelCase :Any = f"""Plugboard setting isn't type string ({type(__magic_name__ )})""" raise TypeError(__magic_name__ ) elif len(__magic_name__ ) % 2 != 0: UpperCamelCase :List[Any] = f"""Odd number of symbols ({len(__magic_name__ )})""" raise Exception(__magic_name__ ) elif pbstring == "": return {} pbstring.replace(""" """ , """""" ) # Checks if all characters are unique UpperCamelCase :Union[str, Any] = set() for i in pbstring: if i not in abc: UpperCamelCase :List[str] = f"""'{i}' not in list of symbols""" raise Exception(__magic_name__ ) elif i in tmppbl: UpperCamelCase :Tuple = f"""Duplicate symbol ({i})""" raise Exception(__magic_name__ ) else: tmppbl.add(__magic_name__ ) del tmppbl # Created the dictionary UpperCamelCase :Optional[int] = {} for j in range(0 , len(__magic_name__ ) - 1 , 2 ): UpperCamelCase :Optional[Any] = pbstring[j + 1] UpperCamelCase :str = pbstring[j] return pb def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : RotorPositionT , __magic_name__ : RotorSelectionT = (rotora, rotora, rotora) , __magic_name__ : str = "" , ) -> str: """simple docstring""" UpperCamelCase :Dict = text.upper() UpperCamelCase , UpperCamelCase , UpperCamelCase :List[str] = _validator( __magic_name__ , __magic_name__ , plugb.upper() ) UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = rotor_position UpperCamelCase , UpperCamelCase , UpperCamelCase :Union[str, Any] = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 UpperCamelCase :Dict = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: UpperCamelCase :Tuple = plugboard[symbol] # rotor ra -------------------------- UpperCamelCase :str = abc.index(__magic_name__ ) + rotorposa UpperCamelCase :Any = rotora[index % len(__magic_name__ )] # rotor rb -------------------------- UpperCamelCase :str = abc.index(__magic_name__ ) + rotorposa UpperCamelCase :str = rotora[index % len(__magic_name__ )] # rotor rc -------------------------- UpperCamelCase :int = abc.index(__magic_name__ ) + rotorposa UpperCamelCase :List[Any] = rotora[index % len(__magic_name__ )] # reflector -------------------------- # this is the reason you don't need another machine to decipher UpperCamelCase :Optional[Any] = reflector[symbol] # 2nd rotors UpperCamelCase :Union[str, Any] = abc[rotora.index(__magic_name__ ) - rotorposa] UpperCamelCase :List[Any] = abc[rotora.index(__magic_name__ ) - rotorposa] UpperCamelCase :Optional[int] = abc[rotora.index(__magic_name__ ) - rotorposa] # 2nd plugboard if symbol in plugboard: UpperCamelCase :str = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(__magic_name__ ): UpperCamelCase :Dict = 0 rotorposa += 1 if rotorposa >= len(__magic_name__ ): UpperCamelCase :str = 0 rotorposa += 1 if rotorposa >= len(__magic_name__ ): UpperCamelCase :Any = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(__magic_name__ ) return "".join(__magic_name__ ) if __name__ == "__main__": UpperCAmelCase_ : str = '''This is my Python script that emulates the Enigma machine from WWII.''' UpperCAmelCase_ : Optional[int] = (1, 1, 1) UpperCAmelCase_ : List[str] = '''pictures''' UpperCAmelCase_ : int = (rotora, rotora, rotora) UpperCAmelCase_ : Union[str, Any] = enigma(message, rotor_pos, rotor_sel, pb) print('''Encrypted message:''', en) print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
38
"""simple docstring""" def a_ ( lowerCamelCase ): return str(lowerCamelCase ) == str(lowerCamelCase )[::-1] def a_ ( lowerCamelCase ): return int(lowerCamelCase ) + int(str(lowerCamelCase )[::-1] ) def a_ ( lowerCamelCase = 1_0_0_0_0 ): UpperCAmelCase__ = [] for num in range(1 , lowerCamelCase ): UpperCAmelCase__ = 0 UpperCAmelCase__ = num while iterations < 5_0: UpperCAmelCase__ = sum_reverse(lowerCamelCase ) iterations += 1 if is_palindrome(lowerCamelCase ): break else: lychrel_nums.append(lowerCamelCase ) return len(lowerCamelCase ) if __name__ == "__main__": print(F"""{solution() = }""")
98
0
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = {'configuration_mmbt': ['MMBTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings'] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
358
from string import ascii_uppercase lowerCAmelCase_ = {char: i for i, char in enumerate(ascii_uppercase)} lowerCAmelCase_ = dict(enumerate(ascii_uppercase)) def snake_case( __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' lowercase : Optional[Any] = len(__magic_name__ ) lowercase : Any = 0 while True: if x == i: lowercase : Any = 0 if len(__magic_name__ ) == len(__magic_name__ ): break key += key[i] i += 1 return key def snake_case( __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' lowercase : str = '''''' lowercase : Dict = 0 for letter in message: if letter == " ": cipher_text += " " else: lowercase : Dict = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def snake_case( __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' lowercase : Any = '''''' lowercase : str = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: lowercase : Any = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def snake_case( ) -> None: '''simple docstring''' lowercase : Dict = '''THE GERMAN ATTACK''' lowercase : Dict = '''SECRET''' lowercase : Union[str, Any] = generate_key(__magic_name__ , __magic_name__ ) lowercase : List[str] = cipher_text(__magic_name__ , __magic_name__ ) print(F"""Encrypted Text = {s}""" ) print(F"""Original Text = {original_text(__magic_name__ , __magic_name__ )}""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
116
0
'''simple docstring''' import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def snake_case_ (_a : Tuple , _a : List[str] , _a : Union[str, Any] ): UpperCAmelCase = RemBertConfig.from_json_file(__a ) print('''Building PyTorch model from configuration: {}'''.format(str(__a ) ) ) UpperCAmelCase = RemBertModel(__a ) # Load weights from tf checkpoint load_tf_weights_in_rembert(__a , __a , __a ) # Save pytorch-model print('''Save PyTorch model to {}'''.format(__a ) ) torch.save(model.state_dict() , __a ) if __name__ == "__main__": A =argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--rembert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained RemBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) A =parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
34
'''simple docstring''' from __future__ import annotations from random import choice def UpperCAmelCase_ (__a : str ): """simple docstring""" return choice(__a ) def UpperCAmelCase_ (__a : list[int] , __a : int ): """simple docstring""" _a : Dict = random_pivot(__a ) # partition based on pivot # linear time _a : Optional[int] = [e for e in lst if e < pivot] _a : List[str] = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(__a ) == k - 1: return pivot # pivot is in elements bigger than k elif len(__a ) < k - 1: return kth_number(__a , k - len(__a ) - 1 ) # pivot is in elements smaller than k else: return kth_number(__a , __a ) if __name__ == "__main__": import doctest doctest.testmod()
271
0
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase__ : List[Any] = logging.get_logger(__name__) UpperCAmelCase__ : int = OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) UpperCAmelCase__ : Tuple = OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) UpperCAmelCase__ : Any = OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) UpperCAmelCase__ : Tuple = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) UpperCAmelCase__ : Optional[int] = OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) UpperCAmelCase__ : int = OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) UpperCAmelCase__ : Optional[int] = OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) UpperCAmelCase__ : List[str] = OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) UpperCAmelCase__ : List[Any] = OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) UpperCAmelCase__ : Union[str, Any] = OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) UpperCAmelCase__ : Any = OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) UpperCAmelCase__ : List[str] = OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) UpperCAmelCase__ : str = OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) UpperCAmelCase__ : List[str] = OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) UpperCAmelCase__ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase__ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase__ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase__ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase__ : Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase__ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase__ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase__ : str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase__ : str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase__ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase__ : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase__ : str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase__ : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase__ : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase : Any = FLAX_MODEL_MAPPING UpperCAmelCase__ : Union[str, Any] = auto_class_update(FlaxAutoModel) class UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase : str = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase__ : List[str] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase : Dict = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase__ : List[str] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase : str = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase__ : Tuple = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase : Any = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase__ : Union[str, Any] = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase__ : Dict = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase__ : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase : int = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase__ : Any = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase : Tuple = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase__ : Union[str, Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase__ : Any = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase : Any = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase__ : Dict = auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase : Optional[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase__ : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase : Tuple = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase__ : List[Any] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
301
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCAmelCase__ : Union[str, Any] = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[Any] = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys UpperCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
301
1
"""simple docstring""" from math import loga def a_ ( _lowerCAmelCase : int ): '''simple docstring''' if a < 0: raise ValueError('Input value must be a positive integer' ) elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): raise TypeError('Input value must be a \'int\' type' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
77
'''simple docstring''' import argparse import os import re import packaging.version UpperCamelCase__: Union[str, Any] = "examples/" UpperCamelCase__: Optional[Any] = { "examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"), "init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"), "setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","), "doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"), } UpperCamelCase__: Optional[int] = { "init": "src/diffusers/__init__.py", "setup": "setup.py", } UpperCamelCase__: List[Any] = "README.md" def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int ) -> Optional[int]: with open(_lowerCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: UpperCAmelCase : Optional[int] = f.read() UpperCAmelCase , UpperCAmelCase : List[Any] = REPLACE_PATTERNS[pattern] UpperCAmelCase : List[Any] = replace.replace('''VERSION''' , _lowerCAmelCase ) UpperCAmelCase : Optional[Any] = re_pattern.sub(_lowerCAmelCase , _lowerCAmelCase ) with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(_lowerCAmelCase ) def snake_case_ ( _lowerCAmelCase : Any ) -> Optional[int]: for folder, directories, fnames in os.walk(_lowerCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , pattern='''examples''' ) def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : str=False ) -> List[str]: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if not patch: update_version_in_examples(_lowerCAmelCase ) def snake_case_ ( ) -> Optional[Any]: UpperCAmelCase : Optional[int] = '''🤗 Transformers currently provides the following architectures''' UpperCAmelCase : Optional[int] = '''1. Want to contribute a new model?''' with open(_lowerCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: UpperCAmelCase : Optional[Any] = f.readlines() # Find the start of the list. UpperCAmelCase : List[Any] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 UpperCAmelCase : Optional[Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): UpperCAmelCase : Optional[int] = lines[index].replace( '''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , ) index += 1 with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(_lowerCAmelCase ) def snake_case_ ( ) -> Optional[Any]: with open(REPLACE_FILES['''init'''] , '''r''' ) as f: UpperCAmelCase : Union[str, Any] = f.read() UpperCAmelCase : int = REPLACE_PATTERNS['''init'''][0].search(_lowerCAmelCase ).groups()[0] return packaging.version.parse(_lowerCAmelCase ) def snake_case_ ( _lowerCAmelCase : List[str]=False ) -> Any: UpperCAmelCase : Optional[Any] = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: UpperCAmelCase : Optional[int] = default_version.base_version elif patch: UpperCAmelCase : Union[str, Any] = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: UpperCAmelCase : Union[str, Any] = f"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. UpperCAmelCase : Dict = input(f"""Which version are you releasing? [{default_version}]""" ) if len(_lowerCAmelCase ) == 0: UpperCAmelCase : Tuple = default_version print(f"""Updating version to {version}.""" ) global_version_update(_lowerCAmelCase , patch=_lowerCAmelCase ) def snake_case_ ( ) -> Any: UpperCAmelCase : List[Any] = get_version() UpperCAmelCase : List[str] = f"""{current_version.major}.{current_version.minor + 1}.0.dev0""" UpperCAmelCase : List[Any] = current_version.base_version # Check with the user we got that right. UpperCAmelCase : Optional[int] = input(f"""Which version are we developing now? [{dev_version}]""" ) if len(_lowerCAmelCase ) == 0: UpperCAmelCase : Dict = dev_version print(f"""Updating version to {version}.""" ) global_version_update(_lowerCAmelCase ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase__: Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") UpperCamelCase__: Optional[Any] = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("Nothing to do after a patch :-)") else: post_release_work()
23
0
import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments __SCREAMING_SNAKE_CASE = logging.getLogger(__name__) @dataclass class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = field( default=0.0 ,metadata={"help": "The label smoothing epsilon to apply (if not zero)."} ) a__ = field(default=_A ,metadata={"help": "Whether to SortishSamler or not."} ) a__ = field( default=_A ,metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) a__ = field(default=_A ,metadata={"help": "whether to use adafactor"} ) a__ = field( default=_A ,metadata={"help": "Encoder layer dropout probability. Goes into model.config."} ) a__ = field( default=_A ,metadata={"help": "Decoder layer dropout probability. Goes into model.config."} ) a__ = field(default=_A ,metadata={"help": "Dropout probability. Goes into model.config."} ) a__ = field( default=_A ,metadata={"help": "Attention dropout probability. Goes into model.config."} ) a__ = field( default="linear" ,metadata={"help": F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} ,)
256
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance __SCREAMING_SNAKE_CASE = 637_8137.0 __SCREAMING_SNAKE_CASE = 635_6752.31_4245 __SCREAMING_SNAKE_CASE = 6378137 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : List[Any] = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude A : Tuple = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) ) A : Tuple = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius A : List[str] = haversine_distance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) / EQUATORIAL_RADIUS # Intermediate P and Q values A : List[Any] = (b_lata + b_lata) / 2 A : Optional[Any] = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) A : List[str] = (sin(_lowerCamelCase ) ** 2) * (cos(_lowerCamelCase ) ** 2) A : Optional[int] = cos(sigma / 2 ) ** 2 A : int = (sigma - sin(_lowerCamelCase )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) A : List[str] = (cos(_lowerCamelCase ) ** 2) * (sin(_lowerCamelCase ) ** 2) A : Union[str, Any] = sin(sigma / 2 ) ** 2 A : int = (sigma + sin(_lowerCamelCase )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
256
1
'''simple docstring''' def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool: return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(_lowerCAmelCase ) ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool: # Base Case if index == len(_lowerCAmelCase ): return True # Recursive Step for i in range(_lowerCAmelCase ): if valid_coloring(graph[index] , _lowerCAmelCase , _lowerCAmelCase ): # Color current vertex snake_case__ : List[Any] = i # Validate coloring if util_color(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index + 1 ): return True # Backtrack snake_case__ : Optional[Any] = -1 return False def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]: snake_case__ : Dict = [-1] * len(_lowerCAmelCase ) if util_color(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 0 ): return colored_vertices return []
35
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } __SCREAMING_SNAKE_CASE : List[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = {} with open(_SCREAMING_SNAKE_CASE , """r""" ) as file: for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ): snake_case_ = line.strip() if line: snake_case_ = line.split() snake_case_ = line_number snake_case_ = words[0] snake_case_ = value return result def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: for attribute in key.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape elif weight_type is not None and weight_type == "param": snake_case_ = hf_pointer for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = shape_pointer.shape # let's reduce dimension snake_case_ = value[0] else: snake_case_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case_ = value elif weight_type == "weight_g": snake_case_ = value elif weight_type == "weight_v": snake_case_ = value elif weight_type == "bias": snake_case_ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = value else: snake_case_ = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": snake_case_ = """.""".join([key, hf_param_name] ) else: snake_case_ = key snake_case_ = value if """lm_head""" in full_key else value[0] __SCREAMING_SNAKE_CASE : int = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]: snake_case_ = False for key, mapped_key in MAPPING.items(): snake_case_ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case_ = True if "*" in mapped_key: snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2] snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: snake_case_ = """weight_g""" elif "weight_v" in name: snake_case_ = """weight_v""" elif "bias" in name: snake_case_ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case_ = """weight""" else: snake_case_ = None if hf_dict is not None: rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return is_used return is_used def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = [] snake_case_ = fairseq_model.state_dict() snake_case_ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): snake_case_ = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , ) snake_case_ = True else: snake_case_ = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ = full_name.split("""conv_layers.""" )[-1] snake_case_ = name.split(""".""" ) snake_case_ = int(items[0] ) snake_case_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> int: if config_path is not None: snake_case_ = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaConfig() if is_seq_class: snake_case_ = read_txt_into_dict(_SCREAMING_SNAKE_CASE ) snake_case_ = idalabel snake_case_ = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) elif is_finetuned: if dict_path: snake_case_ = Dictionary.load(_SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case_ = target_dict.pad_index snake_case_ = target_dict.bos_index snake_case_ = target_dict.eos_index snake_case_ = len(target_dict.symbols ) snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) ) return os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) snake_case_ = target_dict.indices # fairseq has the <pad> and <s> switched snake_case_ = 0 snake_case_ = 1 with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaCTCTokenizer( _SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , ) snake_case_ = True if config.feat_extract_norm == """layer""" else False snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) snake_case_ = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaForCTC(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE ) if is_finetuned or is_seq_class: snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: snake_case_ = argparse.Namespace(task="""audio_pretraining""" ) snake_case_ = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE ) snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE ) snake_case_ = model[0].eval() recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) __SCREAMING_SNAKE_CASE : Any = parser.parse_args() __SCREAMING_SNAKE_CASE : List[Any] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
347
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase : Tuple = { "configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[int] = [ "NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST", "NezhaForNextSentencePrediction", "NezhaForMaskedLM", "NezhaForPreTraining", "NezhaForMultipleChoice", "NezhaForQuestionAnswering", "NezhaForSequenceClassification", "NezhaForTokenClassification", "NezhaModel", "NezhaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys _UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
110
import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _UpperCAmelCase : Union[str, Any] = 16 _UpperCAmelCase : Dict = 32 def A ( lowercase , lowercase = 16 ) -> str: '''simple docstring''' UpperCamelCase = AutoTokenizer.from_pretrained('bert-base-cased' ) UpperCamelCase = load_dataset('glue' , 'mrpc' ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase , max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCamelCase = datasets.map( lowercase , batched=lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCamelCase = 16 elif accelerator.mixed_precision != "no": UpperCamelCase = 8 else: UpperCamelCase = None return tokenizer.pad( lowercase , padding='longest' , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors='pt' , ) # Instantiate dataloaders. UpperCamelCase = DataLoader( tokenized_datasets['train'] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase , drop_last=lowercase ) UpperCamelCase = DataLoader( tokenized_datasets['validation'] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase , drop_last=(accelerator.mixed_precision == 'fp8') , ) return train_dataloader, eval_dataloader def A ( lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCamelCase = config['lr'] UpperCamelCase = int(config['num_epochs'] ) UpperCamelCase = int(config['seed'] ) UpperCamelCase = int(config['batch_size'] ) UpperCamelCase = evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation UpperCamelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE UpperCamelCase = MAX_GPU_BATCH_SIZE set_seed(lowercase ) UpperCamelCase , UpperCamelCase = get_dataloaders(lowercase , lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCamelCase = model.to(accelerator.device ) # Instantiate optimizer UpperCamelCase = AdamW(params=model.parameters() , lr=lowercase ) # Instantiate scheduler UpperCamelCase = get_linear_schedule_with_warmup( optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( lowercase , lowercase , lowercase , lowercase , lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCamelCase = model(**lowercase ) UpperCamelCase = outputs.loss UpperCamelCase = loss / gradient_accumulation_steps accelerator.backward(lowercase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCamelCase = model(**lowercase ) UpperCamelCase = outputs.logits.argmax(dim=-1 ) UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=lowercase , references=lowercase , ) UpperCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , lowercase ) def A ( ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=lowercase , default=lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) UpperCamelCase = parser.parse_args() UpperCamelCase = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(lowercase , lowercase ) if __name__ == "__main__": main()
110
1
'''simple docstring''' def snake_case_ ( __SCREAMING_SNAKE_CASE : dict ): """simple docstring""" lowercase_ : List[Any] = set() # edges = list of graph's edges lowercase_ : Union[str, Any] = get_edges(__SCREAMING_SNAKE_CASE ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: lowercase_ , lowercase_ : Tuple = edges.pop() chosen_vertices.add(__SCREAMING_SNAKE_CASE ) chosen_vertices.add(__SCREAMING_SNAKE_CASE ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(__SCREAMING_SNAKE_CASE ) return chosen_vertices def snake_case_ ( __SCREAMING_SNAKE_CASE : dict ): """simple docstring""" lowercase_ : Any = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
93
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow def _snake_case ( self ): """simple docstring""" lowercase_ : Dict = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' ) lowercase_ : int = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids lowercase_ : Union[str, Any] = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids lowercase_ : Union[str, Any] = model(input_ids.to(__SCREAMING_SNAKE_CASE ) , labels=labels.to(__SCREAMING_SNAKE_CASE ) ).loss lowercase_ : int = -(labels.shape[-1] * loss.item()) lowercase_ : Any = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
93
1
"""simple docstring""" import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> str: __UpperCAmelCase : str = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") __UpperCAmelCase : Union[str, Any] = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(UpperCAmelCase__ ): os.makedirs(UpperCAmelCase__ ) __UpperCAmelCase : int = model.state_dict() def to_tf_var_name(snake_case__ ): for patt, repl in iter(UpperCAmelCase__ ): __UpperCAmelCase : List[Any] = name.replace(UpperCAmelCase__, UpperCAmelCase__ ) return f'''bert/{name}''' def create_tf_var(snake_case__, snake_case__, snake_case__ ): __UpperCAmelCase : str = tf.dtypes.as_dtype(tensor.dtype ) __UpperCAmelCase : Optional[int] = tf.get_variable(dtype=UpperCAmelCase__, shape=tensor.shape, name=UpperCAmelCase__, initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(UpperCAmelCase__ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: __UpperCAmelCase : Dict = to_tf_var_name(UpperCAmelCase__ ) __UpperCAmelCase : Tuple = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): __UpperCAmelCase : str = torch_tensor.T __UpperCAmelCase : Dict = create_tf_var(tensor=UpperCAmelCase__, name=UpperCAmelCase__, session=UpperCAmelCase__ ) tf.keras.backend.set_value(UpperCAmelCase__, UpperCAmelCase__ ) __UpperCAmelCase : int = session.run(UpperCAmelCase__ ) print(f'''Successfully created {tf_name}: {np.allclose(UpperCAmelCase__, UpperCAmelCase__ )}''' ) __UpperCAmelCase : Optional[Any] = tf.train.Saver(tf.trainable_variables() ) saver.save(UpperCAmelCase__, os.path.join(UpperCAmelCase__, model_name.replace("-", "_" ) + ".ckpt" ) ) def _UpperCamelCase ( snake_case__=None ) -> Union[str, Any]: __UpperCAmelCase : str = argparse.ArgumentParser() parser.add_argument("--model_name", type=UpperCAmelCase__, required=UpperCAmelCase__, help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir", type=UpperCAmelCase__, default=UpperCAmelCase__, required=UpperCAmelCase__, help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path", type=UpperCAmelCase__, required=UpperCAmelCase__, help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir", type=UpperCAmelCase__, required=UpperCAmelCase__, help="Directory in which to save tensorflow model" ) __UpperCAmelCase : int = parser.parse_args(UpperCAmelCase__ ) __UpperCAmelCase : Optional[int] = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name, state_dict=torch.load(args.pytorch_model_path ), cache_dir=args.cache_dir, ) convert_pytorch_checkpoint_to_tf(model=UpperCAmelCase__, ckpt_dir=args.tf_cache_dir, model_name=args.model_name ) if __name__ == "__main__": main()
363
import logging import os from .state import PartialState class _snake_case ( logging.LoggerAdapter ): @staticmethod def _lowerCamelCase ( __lowerCamelCase: Any ) -> int: __UpperCAmelCase : str = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[Any] ) -> Optional[int]: if PartialState._shared_state == {}: raise RuntimeError( "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." ) __UpperCAmelCase : Any = kwargs.pop("main_process_only" , __lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = kwargs.pop("in_order" , __lowerCamelCase ) if self.isEnabledFor(__lowerCamelCase ): if self._should_log(__lowerCamelCase ): __UpperCAmelCase , __UpperCAmelCase : int = self.process(__lowerCamelCase , __lowerCamelCase ) self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) elif in_order: __UpperCAmelCase : Optional[int] = PartialState() for i in range(state.num_processes ): if i == state.process_index: __UpperCAmelCase , __UpperCAmelCase : List[Any] = self.process(__lowerCamelCase , __lowerCamelCase ) self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) state.wait_for_everyone() def _UpperCamelCase ( snake_case__, snake_case__ = None ) -> List[str]: if log_level is None: __UpperCAmelCase : List[Any] = os.environ.get("ACCELERATE_LOG_LEVEL", snake_case__ ) __UpperCAmelCase : Union[str, Any] = logging.getLogger(snake_case__ ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(snake_case__, {} )
342
0
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class __snake_case ( A__ ): _a = (DDPMScheduler,) def UpperCAmelCase__ ( self : Union[str, Any] , **A_ : Any): lowerCAmelCase_ : Tuple = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**UpperCamelCase_) return config def UpperCAmelCase__ ( self : Union[str, Any]): for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCamelCase_) def UpperCAmelCase__ ( self : List[str]): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_) def UpperCAmelCase__ ( self : Dict): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCamelCase_) def UpperCAmelCase__ ( self : Dict): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCamelCase_) def UpperCAmelCase__ ( self : Any): for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase_) def UpperCAmelCase__ ( self : Tuple): self.check_over_configs(thresholding=UpperCamelCase_) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , ) def UpperCAmelCase__ ( self : int): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase_) def UpperCAmelCase__ ( self : Union[str, Any]): for t in [0, 5_0_0, 9_9_9]: self.check_over_forward(time_step=UpperCamelCase_) def UpperCAmelCase__ ( self : List[str]): lowerCAmelCase_ : int = self.scheduler_classes[0] lowerCAmelCase_ : Tuple = self.get_scheduler_config() lowerCAmelCase_ : Union[str, Any] = scheduler_class(**UpperCamelCase_) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0_0979)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.02)) < 1e-5 def UpperCAmelCase__ ( self : str): lowerCAmelCase_ : List[Any] = self.scheduler_classes[0] lowerCAmelCase_ : Any = self.get_scheduler_config() lowerCAmelCase_ : int = scheduler_class(**UpperCamelCase_) lowerCAmelCase_ : Dict = len(UpperCamelCase_) lowerCAmelCase_ : Tuple = self.dummy_model() lowerCAmelCase_ : int = self.dummy_sample_deter lowerCAmelCase_ : Optional[Any] = torch.manual_seed(0) for t in reversed(range(UpperCamelCase_)): # 1. predict noise residual lowerCAmelCase_ : Dict = model(UpperCamelCase_ , UpperCamelCase_) # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ : Any = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase_ : Optional[int] = pred_prev_sample lowerCAmelCase_ : Dict = torch.sum(torch.abs(UpperCamelCase_)) lowerCAmelCase_ : Optional[Any] = torch.mean(torch.abs(UpperCamelCase_)) assert abs(result_sum.item() - 258.9606) < 1e-2 assert abs(result_mean.item() - 0.3372) < 1e-3 def UpperCAmelCase__ ( self : Any): lowerCAmelCase_ : Dict = self.scheduler_classes[0] lowerCAmelCase_ : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''') lowerCAmelCase_ : List[Any] = scheduler_class(**UpperCamelCase_) lowerCAmelCase_ : Optional[Any] = len(UpperCamelCase_) lowerCAmelCase_ : List[str] = self.dummy_model() lowerCAmelCase_ : Optional[int] = self.dummy_sample_deter lowerCAmelCase_ : Tuple = torch.manual_seed(0) for t in reversed(range(UpperCamelCase_)): # 1. predict noise residual lowerCAmelCase_ : Tuple = model(UpperCamelCase_ , UpperCamelCase_) # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ : Optional[int] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase_ : List[str] = pred_prev_sample lowerCAmelCase_ : List[str] = torch.sum(torch.abs(UpperCamelCase_)) lowerCAmelCase_ : Tuple = torch.mean(torch.abs(UpperCamelCase_)) assert abs(result_sum.item() - 202.0296) < 1e-2 assert abs(result_mean.item() - 0.2631) < 1e-3 def UpperCAmelCase__ ( self : Any): lowerCAmelCase_ : Optional[Any] = self.scheduler_classes[0] lowerCAmelCase_ : Optional[Any] = self.get_scheduler_config() lowerCAmelCase_ : List[Any] = scheduler_class(**UpperCamelCase_) lowerCAmelCase_ : List[str] = [1_0_0, 8_7, 5_0, 1, 0] scheduler.set_timesteps(timesteps=UpperCamelCase_) lowerCAmelCase_ : Tuple = scheduler.timesteps for i, timestep in enumerate(UpperCamelCase_): if i == len(UpperCamelCase_) - 1: lowerCAmelCase_ : List[str] = -1 else: lowerCAmelCase_ : List[str] = timesteps[i + 1] lowerCAmelCase_ : Optional[int] = scheduler.previous_timestep(UpperCamelCase_) lowerCAmelCase_ : Optional[Any] = prev_t.item() self.assertEqual(UpperCamelCase_ , UpperCamelCase_) def UpperCAmelCase__ ( self : Dict): lowerCAmelCase_ : Tuple = self.scheduler_classes[0] lowerCAmelCase_ : List[Any] = self.get_scheduler_config() lowerCAmelCase_ : Union[str, Any] = scheduler_class(**UpperCamelCase_) lowerCAmelCase_ : List[str] = [1_0_0, 8_7, 5_0, 5_1, 0] with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.'''): scheduler.set_timesteps(timesteps=UpperCamelCase_) def UpperCAmelCase__ ( self : str): lowerCAmelCase_ : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase_ : Union[str, Any] = self.get_scheduler_config() lowerCAmelCase_ : Union[str, Any] = scheduler_class(**UpperCamelCase_) lowerCAmelCase_ : Tuple = [1_0_0, 8_7, 5_0, 1, 0] lowerCAmelCase_ : List[Any] = len(UpperCamelCase_) with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''): scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_) def UpperCAmelCase__ ( self : int): lowerCAmelCase_ : str = self.scheduler_classes[0] lowerCAmelCase_ : Any = self.get_scheduler_config() lowerCAmelCase_ : Dict = scheduler_class(**UpperCamelCase_) lowerCAmelCase_ : Tuple = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=UpperCamelCase_)
103
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() __snake_case = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __snake_case = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", F"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", F"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""")) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''), ('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ] ) def a ( __a , __a , __a ) -> List[str]: '''simple docstring''' UpperCamelCase__ :List[Any] = state_dict.pop(__a ) UpperCamelCase__ :int = val def a ( __a ) -> Any: '''simple docstring''' UpperCamelCase__ :Tuple = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase__ :Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) UpperCamelCase__ :List[str] = value else: UpperCamelCase__ :Dict = value return new_state_dict def a ( __a ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ :Optional[Any] = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) UpperCamelCase__ :str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ :Any = in_proj_weight[:256, :] UpperCamelCase__ :Tuple = in_proj_bias[:256] UpperCamelCase__ :Optional[int] = in_proj_weight[256:512, :] UpperCamelCase__ :Optional[Any] = in_proj_bias[256:512] UpperCamelCase__ :Tuple = in_proj_weight[-256:, :] UpperCamelCase__ :Dict = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention UpperCamelCase__ :List[str] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ :Any = in_proj_weight[:256, :] UpperCamelCase__ :Optional[int] = in_proj_bias[:256] UpperCamelCase__ :Tuple = in_proj_weight[256:512, :] UpperCamelCase__ :Dict = in_proj_bias[256:512] UpperCamelCase__ :Any = in_proj_weight[-256:, :] UpperCamelCase__ :Dict = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention UpperCamelCase__ :List[str] = state_dict.pop( f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) UpperCamelCase__ :Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict UpperCamelCase__ :Optional[Any] = in_proj_weight_cross_attn[:256, :] UpperCamelCase__ :Any = in_proj_bias_cross_attn[:256] UpperCamelCase__ :Any = in_proj_weight_cross_attn[256:512, :] UpperCamelCase__ :Dict = in_proj_bias_cross_attn[256:512] UpperCamelCase__ :str = in_proj_weight_cross_attn[-256:, :] UpperCamelCase__ :Tuple = in_proj_bias_cross_attn[-256:] def a ( __a , __a ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ :str = image.size UpperCamelCase__ :Optional[Any] = max(__a , __a ) UpperCamelCase__ :List[Any] = 800 if '''detection''' in checkpoint_url else 1000 UpperCamelCase__ :Dict = target_max_size / current_max_size UpperCamelCase__ :Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def a ( __a ) -> int: '''simple docstring''' UpperCamelCase__ :Any = F.to_tensor(__a ) UpperCamelCase__ :int = F.normalize(__a , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ) return image @torch.no_grad() def a ( __a , __a , __a ) -> Dict: '''simple docstring''' logger.info('''Converting model...''' ) # load original state dict UpperCamelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' ) # rename keys for src, dest in rename_keys: rename_key(__a , __a , __a ) UpperCamelCase__ :Any = rename_backbone_keys(__a ) # query, key and value matrices need special treatment read_in_q_k_v(__a ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase__ :Dict = '''model.''' for key in state_dict.copy().keys(): if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): UpperCamelCase__ :Optional[Any] = state_dict.pop(__a ) UpperCamelCase__ :int = val # create HuggingFace model and load state dict UpperCamelCase__ :str = TableTransformerConfig( backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: UpperCamelCase__ :List[str] = 15 UpperCamelCase__ :int = 2 UpperCamelCase__ :Tuple = {0: '''table''', 1: '''table rotated'''} UpperCamelCase__ :int = idalabel UpperCamelCase__ :Dict = {v: k for k, v in idalabel.items()} else: UpperCamelCase__ :int = 125 UpperCamelCase__ :List[str] = 6 UpperCamelCase__ :Optional[Any] = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } UpperCamelCase__ :Dict = idalabel UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()} UpperCamelCase__ :List[Any] = DetrImageProcessor( format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 ) UpperCamelCase__ :int = TableTransformerForObjectDetection(__a ) model.load_state_dict(__a ) model.eval() # verify our conversion UpperCamelCase__ :Dict = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' UpperCamelCase__ :Optional[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__a ) UpperCamelCase__ :Tuple = Image.open(__a ).convert('''RGB''' ) UpperCamelCase__ :int = normalize(resize(__a , __a ) ).unsqueeze(0 ) UpperCamelCase__ :Optional[int] = model(__a ) if "detection" in checkpoint_url: UpperCamelCase__ :Dict = (1, 15, 3) UpperCamelCase__ :List[Any] = torch.tensor( [[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] ) UpperCamelCase__ :Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] ) else: UpperCamelCase__ :Optional[Any] = (1, 125, 7) UpperCamelCase__ :Dict = torch.tensor( [[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] ) UpperCamelCase__ :List[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) image_processor.save_pretrained(__a ) if push_to_hub: # Push model to HF hub logger.info('''Pushing model to the hub...''' ) UpperCamelCase__ :Union[str, Any] = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(__a ) image_processor.push_to_hub(__a ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', type=str, choices=[ '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''', ], help='''URL of the Table Transformer checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) __snake_case = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
97
0
"""simple docstring""" import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class A_ ( unittest.TestCase ): """simple docstring""" @property def _lowercase ( self: Dict ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : List[Any] = ort.SessionOptions() _lowerCamelCase : List[Any] = False return options def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : int = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) _lowerCamelCase : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) _lowerCamelCase : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" ) # using the PNDM scheduler by default _lowerCamelCase : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( "CompVis/stable-diffusion-v1-4" ,revision="onnx" ,safety_checker=__lowerCAmelCase ,feature_extractor=__lowerCAmelCase ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : Dict = "A red cat sitting on a park bench" _lowerCamelCase : List[str] = np.random.RandomState(0 ) _lowerCamelCase : str = pipe( prompt=__lowerCAmelCase ,image=__lowerCAmelCase ,mask_image=__lowerCAmelCase ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=15 ,generator=__lowerCAmelCase ,output_type="np" ,) _lowerCamelCase : Dict = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-2
370
"""simple docstring""" _lowerCAmelCase : Tuple = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Any = [False] * len(_lowerCamelCase ) _lowerCamelCase : Union[str, Any] = [s] _lowerCamelCase : str = True while queue: _lowerCamelCase : Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_lowerCamelCase ) _lowerCamelCase : Any = True _lowerCamelCase : Any = u return visited[t] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase )) _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : Union[str, Any] = [] _lowerCamelCase : List[str] = [i[:] for i in graph] # Record original cut, copy. while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Any = float("Inf" ) _lowerCamelCase : Dict = sink while s != source: # Find the minimum value in select path _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , graph[parent[s]][s] ) _lowerCamelCase : Union[str, Any] = parent[s] max_flow += path_flow _lowerCamelCase : Optional[Any] = sink while v != source: _lowerCamelCase : Union[str, Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _lowerCamelCase : List[str] = parent[v] for i in range(len(_lowerCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
340
0
import pprint import requests snake_case_ = 'https://zenquotes.io/api' def lowerCamelCase__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def lowerCamelCase__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": snake_case_ = random_quotes() pprint.pprint(response)
24
from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class __snake_case ( __lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = None class __snake_case ( __lowerCamelCase ): '''simple docstring''' def __init__( self : Any , A : List[str]=1 , A : str=0 , A : List[Any]=2 , A : Union[str, Any]=512 , A : Tuple="cls" , A : Union[str, Any]=False , A : Optional[Any]=True , **A : Optional[int] , ): super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A ) __snake_case: str = project_dim __snake_case: Optional[int] = pooler_fn __snake_case: Dict = learn_encoder __snake_case: str = use_attention_mask class __snake_case ( __lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = [R"""pooler""", R"""logit_scale"""] lowerCAmelCase__ = [R"""position_ids""", R"""predictions.decoder.bias"""] lowerCAmelCase__ = """roberta""" lowerCAmelCase__ = RobertaSeriesConfig def __init__( self : Dict , A : Dict ): super().__init__(A ) __snake_case: Optional[Any] = XLMRobertaModel(A ) __snake_case: List[Any] = nn.Linear(config.hidden_size , config.project_dim ) __snake_case: Optional[int] = getattr(A , """has_pre_transformation""" , A ) if self.has_pre_transformation: __snake_case: Optional[Any] = nn.Linear(config.hidden_size , config.project_dim ) __snake_case: Optional[Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps ) self.post_init() def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[bool] = None , A : Optional[bool] = None , ): __snake_case: Any = return_dict if return_dict is not None else self.config.use_return_dict __snake_case: Optional[int] = self.base_model( input_ids=A , attention_mask=A , token_type_ids=A , position_ids=A , head_mask=A , inputs_embeds=A , encoder_hidden_states=A , encoder_attention_mask=A , output_attentions=A , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=A , ) if self.has_pre_transformation: __snake_case: int = outputs["""hidden_states"""][-2] __snake_case: List[str] = self.pre_LN(A ) __snake_case: List[str] = self.transformation_pre(A ) return TransformationModelOutput( projection_state=A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) else: __snake_case: Optional[int] = self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
111
0
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm _SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) @dataclass class _snake_case ( lowercase_ ): lowerCAmelCase_ : Optional[Any] = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self , **a__ ) -> Union[str, Any]: '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: snake_case_ = deprecated_arg[3:] setattr(self , a__ , not kwargs.pop(a__ ) ) logger.warning( F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or' F' {positive_arg}={kwargs[positive_arg]}' ) snake_case_ = kwargs.pop("torchscript" , self.torchscript ) snake_case_ = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics ) snake_case_ = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level ) super().__init__(**a__ ) lowerCAmelCase_ : bool = field(default=lowercase_ , metadata={"help": "Trace the models using torchscript"} ) lowerCAmelCase_ : bool = field(default=lowercase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} ) lowerCAmelCase_ : str = field( default="O1" , metadata={ "help": ( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " "See details at https://nvidia.github.io/apex/amp.html" ) } , ) @cached_property def lowerCAmelCase__ ( self ) -> Tuple["torch.device", int]: '''simple docstring''' requires_backends(self , ["torch"] ) logger.info("PyTorch: setting up devices" ) if not self.cuda: snake_case_ = torch.device("cpu" ) snake_case_ = 0 elif is_torch_tpu_available(): snake_case_ = xm.xla_device() snake_case_ = 0 else: snake_case_ = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) snake_case_ = torch.cuda.device_count() return device, n_gpu @property def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' return is_torch_tpu_available() and self.tpu @property def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' requires_backends(self , ["torch"] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def lowerCAmelCase__ ( self ) -> "torch.device": '''simple docstring''' requires_backends(self , ["torch"] ) return self._setup_devices[0] @property def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' requires_backends(self , ["torch"] ) return self._setup_devices[1] @property def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' return self.n_gpu > 0
92
'''simple docstring''' import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) _SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__) _SCREAMING_SNAKE_CASE : List[str] = {"facebook/bart-base": BartForConditionalGeneration} _SCREAMING_SNAKE_CASE : Union[str, Any] = {"facebook/bart-base": BartTokenizer} def UpperCamelCase_( ): '''simple docstring''' snake_case_ = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." ) parser.add_argument( "--validation_file" , type=snake_case , default=snake_case , help="A csv or a json file containing the validation data." ) parser.add_argument( "--max_length" , type=snake_case , default=5 , help="The maximum total input sequence length after tokenization." , ) parser.add_argument( "--num_beams" , type=snake_case , default=snake_case , help=( "Number of beams to use for evaluation. This argument will be " "passed to ``model.generate``, which is used during ``evaluate`` and ``predict``." ) , ) parser.add_argument( "--model_name_or_path" , type=snake_case , help="Path to pretrained model or model identifier from huggingface.co/models." , required=snake_case , ) parser.add_argument( "--config_name" , type=snake_case , default=snake_case , help="Pretrained config name or path if not the same as model_name" , ) parser.add_argument( "--device" , type=snake_case , default="cpu" , help="Device where the model will be run" , ) parser.add_argument("--output_file_path" , type=snake_case , default=snake_case , help="Where to store the final ONNX file." ) snake_case_ = parser.parse_args() return args def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : List[str]="cpu" ): '''simple docstring''' snake_case_ = model_dict[model_name].from_pretrained(snake_case ).to(snake_case ) snake_case_ = tokenizer_dict[model_name].from_pretrained(snake_case ) if model_name in ["facebook/bart-base"]: snake_case_ = 0 snake_case_ = None snake_case_ = 0 return huggingface_model, tokenizer def UpperCamelCase_( snake_case : int , snake_case : Tuple , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Optional[Any] ): '''simple docstring''' model.eval() snake_case_ = None snake_case_ = torch.jit.script(BARTBeamSearchGenerator(snake_case ) ) with torch.no_grad(): snake_case_ = "My friends are cool but they eat too many carbs." snake_case_ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="pt" ).to(model.device ) snake_case_ = model.generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=snake_case , max_length=snake_case , early_stopping=snake_case , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( snake_case , ( inputs["input_ids"], inputs["attention_mask"], num_beams, max_length, model.config.decoder_start_token_id, ) , snake_case , opset_version=1_4 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={ "input_ids": {0: "batch", 1: "seq"}, "output_ids": {0: "batch", 1: "seq_out"}, } , example_outputs=snake_case , ) logger.info("Model exported to {}".format(snake_case ) ) snake_case_ = remove_dup_initializers(os.path.abspath(snake_case ) ) logger.info("Deduplicated and optimized model written to {}".format(snake_case ) ) snake_case_ = onnxruntime.InferenceSession(snake_case ) snake_case_ = ort_sess.run( snake_case , { "input_ids": inputs["input_ids"].cpu().numpy(), "attention_mask": inputs["attention_mask"].cpu().numpy(), "num_beams": np.array(snake_case ), "max_length": np.array(snake_case ), "decoder_start_token_id": np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 ) logger.info("Model outputs from torch and ONNX Runtime are similar." ) logger.info("Success." ) def UpperCamelCase_( ): '''simple docstring''' snake_case_ = parse_args() snake_case_ = 5 snake_case_ = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() snake_case_ = torch.device(args.device ) snake_case_ , snake_case_ = load_model_tokenizer(args.model_name_or_path , snake_case ) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" ) model.to(snake_case ) if args.max_length: snake_case_ = args.max_length if args.num_beams: snake_case_ = args.num_beams if args.output_file_path: snake_case_ = args.output_file_path else: snake_case_ = "BART.onnx" logger.info("Exporting model to ONNX" ) export_and_validate_model(snake_case , snake_case , snake_case , snake_case , snake_case ) if __name__ == "__main__": main()
92
1