code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import random from typing import Any def a__ ( lowerCAmelCase__ ) -> list[Any]: for _ in range(len(lowerCAmelCase__ ) ): UpperCAmelCase__ : int = random.randint(0 , len(lowerCAmelCase__ ) - 1 ) UpperCAmelCase__ : Optional[int] = random.randint(0 , len(lowerCAmelCase__ ) - 1 ) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = data[b], data[a] return data if __name__ == "__main__": UpperCamelCase__ = [0, 1, 2, 3, 4, 5, 6, 7] UpperCamelCase__ = ['''python''', '''says''', '''hello''', '''!'''] print('''Fisher-Yates Shuffle:''') print('''List''', integers, strings) print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
75
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'torchsde'] def __init__( self : Tuple , *_A : Any , **_A : Optional[Any] ): '''simple docstring''' requires_backends(self , ['''torch''', '''torchsde'''] ) @classmethod def lowercase_ ( cls : List[Any] , *_A : Tuple , **_A : Tuple ): '''simple docstring''' requires_backends(cls , ['''torch''', '''torchsde'''] ) @classmethod def lowercase_ ( cls : List[str] , *_A : Optional[int] , **_A : Any ): '''simple docstring''' requires_backends(cls , ['''torch''', '''torchsde'''] )
75
1
'''simple docstring''' import math def a__ ( ) -> None: UpperCAmelCase__ : List[str] = input('''Enter message: ''' ) UpperCAmelCase__ : Any = int(input(F"""Enter key [2-{len(lowerCAmelCase__ ) - 1}]: """ ) ) UpperCAmelCase__ : List[str] = input('''Encryption/Decryption [e/d]: ''' ) if mode.lower().startswith('''e''' ): UpperCAmelCase__ : Dict = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ ) elif mode.lower().startswith('''d''' ): UpperCAmelCase__ : Optional[int] = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(F"""Output:\n{text + "|"}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: UpperCAmelCase__ : Optional[int] = [''''''] * key for col in range(lowerCAmelCase__ ): UpperCAmelCase__ : Tuple = col while pointer < len(lowerCAmelCase__ ): cipher_text[col] += message[pointer] pointer += key return "".join(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: UpperCAmelCase__ : int = math.ceil(len(lowerCAmelCase__ ) / key ) UpperCAmelCase__ : Any = key UpperCAmelCase__ : Optional[int] = (num_cols * num_rows) - len(lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = [''''''] * num_cols UpperCAmelCase__ : List[str] = 0 UpperCAmelCase__ : List[Any] = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): UpperCAmelCase__ : Optional[int] = 0 row += 1 return "".join(lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
75
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'ctrl' lowerCAmelCase__ = ['past_key_values'] lowerCAmelCase__ = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : List[Any] , _A : Dict=246_534 , _A : Optional[Any]=256 , _A : Dict=1_280 , _A : List[str]=8_192 , _A : Tuple=48 , _A : Optional[Any]=16 , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=1e-6 , _A : Optional[int]=0.0_2 , _A : Tuple=True , **_A : Optional[Any] , ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = vocab_size UpperCAmelCase__ : Any = n_positions UpperCAmelCase__ : Optional[Any] = n_embd UpperCAmelCase__ : List[str] = n_layer UpperCAmelCase__ : Any = n_head UpperCAmelCase__ : int = dff UpperCAmelCase__ : str = resid_pdrop UpperCAmelCase__ : Tuple = embd_pdrop UpperCAmelCase__ : int = layer_norm_epsilon UpperCAmelCase__ : Tuple = initializer_range UpperCAmelCase__ : Union[str, Any] = use_cache super().__init__(**_A )
75
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ = { '''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NezhaForNextSentencePrediction''', '''NezhaForMaskedLM''', '''NezhaForPreTraining''', '''NezhaForMultipleChoice''', '''NezhaForQuestionAnswering''', '''NezhaForSequenceClassification''', '''NezhaForTokenClassification''', '''NezhaModel''', '''NezhaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
75
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'pytorch', 'script': 'run_ddp.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'tensorflow', 'script': 'run_tf_dist.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7}, }, ] ) class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : List[str] ): '''simple docstring''' if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , ) assert hasattr(self , '''env''' ) def lowercase_ ( self : List[Any] , _A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}""" # distributed data settings UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , ) def lowercase_ ( self : Optional[int] , _A : Any ): '''simple docstring''' TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(2,)] ) def lowercase_ ( self : Optional[int] , _A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A ) # run training estimator.fit() # result dataframe UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCAmelCase__ : Any = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
75
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase__ = { '''configuration_conditional_detr''': [ '''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConditionalDetrConfig''', '''ConditionalDetrOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ['''ConditionalDetrFeatureExtractor'''] UpperCamelCase__ = ['''ConditionalDetrImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConditionalDetrForObjectDetection''', '''ConditionalDetrForSegmentation''', '''ConditionalDetrModel''', '''ConditionalDetrPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
75
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets UpperCamelCase__ = '''\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } ''' UpperCamelCase__ = '''\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy. ''' UpperCamelCase__ = R''' Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting "1/2" to "\\frac{1}{2}") Examples: >>> metric = datasets.load_metric("competition_math") >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"]) >>> print(results) {\'accuracy\': 1.0} ''' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): def lowercase_ ( self : Dict ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , ) def lowercase_ ( self : Any , _A : str , _A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 0.0 for i, j in zip(_A , _A ): n_correct += 1.0 if math_equivalence.is_equiv(_A , _A ) else 0.0 UpperCAmelCase__ : Dict = n_correct / len(_A ) return { "accuracy": accuracy, }
75
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ = { '''configuration_informer''': [ '''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InformerForPrediction''', '''InformerModel''', '''InformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
75
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase__ = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''', }, } UpperCamelCase__ = { '''facebook/bart-base''': 1_0_2_4, '''facebook/bart-large''': 1_0_2_4, '''facebook/bart-large-mnli''': 1_0_2_4, '''facebook/bart-large-cnn''': 1_0_2_4, '''facebook/bart-large-xsum''': 1_0_2_4, '''yjernite/bart_eli5''': 1_0_2_4, } class lowerCamelCase_ ( __a ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = ['input_ids', 'attention_mask'] lowerCAmelCase__ = BartTokenizer def __init__( self : Tuple , _A : List[str]=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Tuple="replace" , _A : Optional[Any]="<s>" , _A : int="</s>" , _A : Optional[Any]="</s>" , _A : List[str]="<s>" , _A : Optional[int]="<unk>" , _A : Optional[int]="<pad>" , _A : str="<mask>" , _A : Dict=False , _A : int=True , **_A : Optional[Any] , ): '''simple docstring''' super().__init__( _A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , ) UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space: UpperCAmelCase__ : str = getattr(_A , pre_tok_state.pop('''type''' ) ) UpperCAmelCase__ : Any = add_prefix_space UpperCAmelCase__ : str = pre_tok_class(**_A ) UpperCAmelCase__ : Dict = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` UpperCAmelCase__ : Optional[Any] = '''post_processor''' UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , _A , _A ) if tokenizer_component_instance: UpperCAmelCase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCAmelCase__ : Union[str, Any] = tuple(state['''sep'''] ) if "cls" in state: UpperCAmelCase__ : Union[str, Any] = tuple(state['''cls'''] ) UpperCAmelCase__ : Dict = False if state.get('''add_prefix_space''' , _A ) != add_prefix_space: UpperCAmelCase__ : Union[str, Any] = add_prefix_space UpperCAmelCase__ : Dict = True if state.get('''trim_offsets''' , _A ) != trim_offsets: UpperCAmelCase__ : List[Any] = trim_offsets UpperCAmelCase__ : List[Any] = True if changes_to_apply: UpperCAmelCase__ : Dict = getattr(_A , state.pop('''type''' ) ) UpperCAmelCase__ : Union[str, Any] = component_class(**_A ) setattr(self.backend_tokenizer , _A , _A ) @property def lowercase_ ( self : Dict ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def lowercase_ ( self : Dict , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value UpperCAmelCase__ : str = value def lowercase_ ( self : Optional[int] , *_A : List[str] , **_A : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = kwargs.get('''is_split_into_words''' , _A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*_A , **_A ) def lowercase_ ( self : Optional[Any] , *_A : Union[str, Any] , **_A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = kwargs.get('''is_split_into_words''' , _A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*_A , **_A ) def lowercase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ): '''simple docstring''' UpperCAmelCase__ : str = self._tokenizer.model.save(_A , name=_A ) return tuple(_A ) def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Optional[int]=None ): '''simple docstring''' UpperCAmelCase__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase_ ( self : int , _A : List[int] , _A : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [self.sep_token_id] UpperCAmelCase__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
75
1
'''simple docstring''' from itertools import product def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> list[int]: UpperCAmelCase__ : Optional[Any] = sides_number UpperCAmelCase__ : Optional[Any] = max_face_number * dice_number UpperCAmelCase__ : Optional[int] = [0] * (max_total + 1) UpperCAmelCase__ : Union[str, Any] = 1 UpperCAmelCase__ : List[str] = range(lowerCAmelCase__ , max_face_number + 1 ) for dice_numbers in product(lowerCAmelCase__ , repeat=lowerCAmelCase__ ): UpperCAmelCase__ : Optional[Any] = sum(lowerCAmelCase__ ) totals_frequencies[total] += 1 return totals_frequencies def a__ ( ) -> float: UpperCAmelCase__ : int = total_frequency_distribution( sides_number=4 , dice_number=9 ) UpperCAmelCase__ : Optional[int] = total_frequency_distribution( sides_number=6 , dice_number=6 ) UpperCAmelCase__ : Union[str, Any] = 0 UpperCAmelCase__ : str = 9 UpperCAmelCase__ : str = 4 * 9 UpperCAmelCase__ : Tuple = 6 for peter_total in range(lowerCAmelCase__ , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) UpperCAmelCase__ : str = (4**9) * (6**6) UpperCAmelCase__ : Union[str, Any] = peter_wins_count / total_games_number UpperCAmelCase__ : Any = round(lowerCAmelCase__ , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"""{solution() = }""")
75
'''simple docstring''' import random from typing import Any def a__ ( lowerCAmelCase__ ) -> list[Any]: for _ in range(len(lowerCAmelCase__ ) ): UpperCAmelCase__ : int = random.randint(0 , len(lowerCAmelCase__ ) - 1 ) UpperCAmelCase__ : Optional[int] = random.randint(0 , len(lowerCAmelCase__ ) - 1 ) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = data[b], data[a] return data if __name__ == "__main__": UpperCamelCase__ = [0, 1, 2, 3, 4, 5, 6, 7] UpperCamelCase__ = ['''python''', '''says''', '''hello''', '''!'''] print('''Fisher-Yates Shuffle:''') print('''List''', integers, strings) print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
75
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCamelCase__ = { '''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''], '''processing_trocr''': ['''TrOCRProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrOCRForCausalLM''', '''TrOCRPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
75
'''simple docstring''' import math def a__ ( lowerCAmelCase__ ) -> list[int]: UpperCAmelCase__ : List[Any] = [] UpperCAmelCase__ : Dict = 2 UpperCAmelCase__ : Optional[Any] = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment UpperCAmelCase__ : str = [True] * (end + 1) UpperCAmelCase__ : Any = [] while start <= end: if temp[start] is True: in_prime.append(lowerCAmelCase__ ) for i in range(start * start , end + 1 , lowerCAmelCase__ ): UpperCAmelCase__ : Dict = False start += 1 prime += in_prime UpperCAmelCase__ : Optional[int] = end + 1 UpperCAmelCase__ : str = min(2 * end , lowerCAmelCase__ ) while low <= n: UpperCAmelCase__ : List[str] = [True] * (high - low + 1) for each in in_prime: UpperCAmelCase__ : List[str] = math.floor(low / each ) * each if t < low: t += each for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ): UpperCAmelCase__ : Union[str, Any] = False for j in range(len(lowerCAmelCase__ ) ): if temp[j] is True: prime.append(j + low ) UpperCAmelCase__ : Union[str, Any] = high + 1 UpperCAmelCase__ : str = min(high + end , lowerCAmelCase__ ) return prime print(sieve(1_0**6))
75
1
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time UpperCamelCase__ = Lock() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any: global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(lowerCAmelCase__ ) process_lock.release() # receive your right neighbor's value process_lock.acquire() UpperCAmelCase__ : int = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left UpperCAmelCase__ : Union[str, Any] = min(lowerCAmelCase__ , lowerCAmelCase__ ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(lowerCAmelCase__ ) process_lock.release() # receive your left neighbor's value process_lock.acquire() UpperCAmelCase__ : str = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right UpperCAmelCase__ : List[Any] = max(lowerCAmelCase__ , lowerCAmelCase__ ) # after all swaps are performed, send the values back to main result_pipe[1].send(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: UpperCAmelCase__ : str = [] UpperCAmelCase__ : str = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop UpperCAmelCase__ : Tuple = Pipe() UpperCAmelCase__ : Dict = Pipe() process_array_.append( Process( target=lowerCAmelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) UpperCAmelCase__ : Optional[Any] = temp_rs UpperCAmelCase__ : int = temp_rr for i in range(1 , len(lowerCAmelCase__ ) - 1 ): UpperCAmelCase__ : Tuple = Pipe() UpperCAmelCase__ : Any = Pipe() process_array_.append( Process( target=lowerCAmelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) UpperCAmelCase__ : str = temp_rs UpperCAmelCase__ : List[Any] = temp_rr process_array_.append( Process( target=lowerCAmelCase__ , args=( len(lowerCAmelCase__ ) - 1, arr[len(lowerCAmelCase__ ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(lowerCAmelCase__ ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(lowerCAmelCase__ ) ): UpperCAmelCase__ : Optional[Any] = result_pipe[p][0].recv() process_array_[p].join() return arr def a__ ( ) -> Any: UpperCAmelCase__ : int = list(range(10 , 0 , -1 ) ) print('''Initial List''' ) print(*lowerCAmelCase__ ) UpperCAmelCase__ : str = odd_even_transposition(lowerCAmelCase__ ) print('''Sorted List\n''' ) print(*lowerCAmelCase__ ) if __name__ == "__main__": main()
75
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ): lowerCAmelCase__ = StableDiffusionInpaintPipeline lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowerCAmelCase__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowerCAmelCase__ = frozenset([] ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , ) UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=_A ) torch.manual_seed(0 ) UpperCAmelCase__ : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , ) UpperCAmelCase__ : Union[str, Any] = CLIPTextModel(_A ) UpperCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCAmelCase__ : str = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowercase_ ( self : str , _A : Dict , _A : Any=0 ): '''simple docstring''' UpperCAmelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((64, 64) ) UpperCAmelCase__ : int = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) ) if str(_A ).startswith('''mps''' ): UpperCAmelCase__ : List[Any] = torch.manual_seed(_A ) else: UpperCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase__ : Optional[int] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': init_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : Tuple = self.get_dummy_components() UpperCAmelCase__ : str = StableDiffusionInpaintPipeline(**_A ) UpperCAmelCase__ : List[str] = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : Dict = self.get_dummy_inputs(_A ) UpperCAmelCase__ : Any = sd_pipe(**_A ).images UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : int = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self : Tuple ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : List[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase__ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase__ : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench.npy''' ) UpperCAmelCase__ : Dict = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase__ : str = torch.manual_seed(0 ) UpperCAmelCase__ : str = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , ) UpperCAmelCase__ : int = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 9e-3 def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase__ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase__ : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' ) UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase__ : Any = StableDiffusionInpaintPipeline.from_pretrained( _A , torch_dtype=torch.floataa , safety_checker=_A , ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , ) UpperCAmelCase__ : Tuple = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def lowercase_ ( self : Any ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase__ : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase__ : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase__ : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase__ : str = PNDMScheduler.from_pretrained(_A , subfolder='''scheduler''' ) UpperCAmelCase__ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained( _A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase__ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase__ : Tuple = torch.manual_seed(0 ) UpperCAmelCase__ : Any = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , ) UpperCAmelCase__ : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.6_5 * 10**9
75
1
'''simple docstring''' import math import tensorflow as tf from packaging import version def a__ ( lowerCAmelCase__ ) -> Optional[int]: UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def a__ ( lowerCAmelCase__ ) -> Dict: UpperCAmelCase__ : Any = tf.convert_to_tensor(lowerCAmelCase__ ) UpperCAmelCase__ : str = tf.cast(math.pi , x.dtype ) UpperCAmelCase__ : List[Any] = tf.cast(0.0_4_4_7_1_5 , x.dtype ) UpperCAmelCase__ : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCAmelCase__ , 3 )) )) return x * cdf def a__ ( lowerCAmelCase__ ) -> Optional[int]: UpperCAmelCase__ : Tuple = tf.convert_to_tensor(lowerCAmelCase__ ) return x * tf.tanh(tf.math.softplus(lowerCAmelCase__ ) ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: UpperCAmelCase__ : List[Any] = tf.convert_to_tensor(lowerCAmelCase__ ) UpperCAmelCase__ : int = tf.cast(0.0_4_4_7_1_5 , x.dtype ) UpperCAmelCase__ : Dict = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def a__ ( lowerCAmelCase__ ) -> str: UpperCAmelCase__ : Any = tf.convert_to_tensor(lowerCAmelCase__ ) UpperCAmelCase__ : Union[str, Any] = tf.cast(1.7_0_2 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def a__ ( lowerCAmelCase__ ) -> List[Any]: return tf.clip_by_value(_gelu(lowerCAmelCase__ ) , -10 , 10 ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__=-1 ) -> str: UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = tf.split(lowerCAmelCase__ , 2 , axis=lowerCAmelCase__ ) return a * tf.math.sigmoid(lowerCAmelCase__ ) if version.parse(tf.version.VERSION) >= version.parse('''2.4'''): def a__ ( lowerCAmelCase__ ) -> Optional[Any]: return tf.keras.activations.gelu(lowerCAmelCase__ , approximate=lowerCAmelCase__ ) UpperCamelCase__ = tf.keras.activations.gelu UpperCamelCase__ = approximate_gelu_wrap else: UpperCamelCase__ = _gelu UpperCamelCase__ = _gelu_new UpperCamelCase__ = { '''gelu''': gelu, '''gelu_10''': gelu_aa, '''gelu_fast''': gelu_fast, '''gelu_new''': gelu_new, '''glu''': glu, '''mish''': mish, '''quick_gelu''': quick_gelu, '''relu''': tf.keras.activations.relu, '''sigmoid''': tf.keras.activations.sigmoid, '''silu''': tf.keras.activations.swish, '''swish''': tf.keras.activations.swish, '''tanh''': tf.keras.activations.tanh, } def a__ ( lowerCAmelCase__ ) -> Optional[int]: if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
75
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCamelCase__ = '''platform''' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Tuple: if attention_mask is None: UpperCAmelCase__ : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: UpperCAmelCase__ : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: UpperCAmelCase__ : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase__ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase__ : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class lowerCamelCase_ : def __init__( self : Optional[Any] , _A : Optional[Any] , _A : str=13 , _A : int=7 , _A : Any=True , _A : List[Any]=False , _A : Optional[int]=99 , _A : Optional[int]=16 , _A : int=2 , _A : Optional[int]=4 , _A : Optional[int]=4 , _A : int="gelu" , _A : List[str]=0.1 , _A : str=0.1 , _A : int=32 , _A : Optional[int]=2 , _A : int=1 , _A : Dict=0 , _A : Dict=0.0_2 , ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = parent UpperCAmelCase__ : str = batch_size UpperCAmelCase__ : Dict = seq_length UpperCAmelCase__ : str = is_training UpperCAmelCase__ : int = use_labels UpperCAmelCase__ : Union[str, Any] = vocab_size UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : Any = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : str = hidden_act UpperCAmelCase__ : str = hidden_dropout_prob UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase__ : Union[str, Any] = max_position_embeddings UpperCAmelCase__ : int = eos_token_id UpperCAmelCase__ : Optional[int] = pad_token_id UpperCAmelCase__ : List[str] = bos_token_id UpperCAmelCase__ : Union[str, Any] = initializer_range def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) UpperCAmelCase__ : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) UpperCAmelCase__ : List[Any] = shift_tokens_right(_A , 1 , 2 ) UpperCAmelCase__ : List[Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , ) UpperCAmelCase__ : Tuple = prepare_blenderbot_inputs_dict(_A , _A , _A ) return config, inputs_dict def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs() return config, inputs_dict def lowercase_ ( self : int , _A : List[Any] , _A : Optional[Any] , _A : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = 20 UpperCAmelCase__ : int = model_class_name(_A ) UpperCAmelCase__ : str = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase__ , UpperCAmelCase__ : Dict = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _A , _A ) UpperCAmelCase__ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) UpperCAmelCase__ : str = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase__ : str = model.decode( decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , ) UpperCAmelCase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase__ : Tuple = model.decode( decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , ) UpperCAmelCase__ : int = model.decode(_A , _A ) UpperCAmelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def lowercase_ ( self : Tuple , _A : List[Any] , _A : Tuple , _A : Tuple ): '''simple docstring''' UpperCAmelCase__ : Tuple = 20 UpperCAmelCase__ : Optional[int] = model_class_name(_A ) UpperCAmelCase__ : Optional[int] = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase__ : Any = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) UpperCAmelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A ) UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase__ : int = model.decode( decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , ) UpperCAmelCase__ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase__ : Any = model.decode( decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , ) UpperCAmelCase__ : List[str] = model.decode(_A , _A , decoder_attention_mask=_A ) UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class lowerCamelCase_ ( unittest.TestCase ): lowerCAmelCase__ = 9_9 def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) UpperCAmelCase__ : int = input_ids.shape[0] UpperCAmelCase__ : List[str] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._get_config_and_data() UpperCAmelCase__ : Any = FlaxBlenderbotForConditionalGeneration(_A ) UpperCAmelCase__ : Optional[int] = lm_model(input_ids=_A ) UpperCAmelCase__ : Dict = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) UpperCAmelCase__ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_A ) UpperCAmelCase__ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) UpperCAmelCase__ : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) UpperCAmelCase__ : Tuple = lm_model(input_ids=_A , decoder_input_ids=_A ) UpperCAmelCase__ : int = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) UpperCAmelCase__ : Union[str, Any] = shift_tokens_right(_A , 1 , 2 ) UpperCAmelCase__ : str = np.equal(_A , 1 ).astype(np.floataa ).sum() UpperCAmelCase__ : Dict = np.equal(_A , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(_A , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class lowerCamelCase_ ( __a , unittest.TestCase , __a ): lowerCAmelCase__ = True lowerCAmelCase__ = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = FlaxBlenderbotModelTester(self ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_A , _A , _A ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : Dict = self._prepare_for_class(_A , _A ) UpperCAmelCase__ : str = model_class(_A ) @jax.jit def encode_jitted(_A : Any , _A : Tuple=None , **_A : Optional[int] ): return model.encode(input_ids=_A , attention_mask=_A ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase__ : Optional[Any] = encode_jitted(**_A ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase__ : Tuple = encode_jitted(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) ) for jitted_output, output in zip(_A , _A ): self.assertEqual(jitted_output.shape , output.shape ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : List[str] = model_class(_A ) UpperCAmelCase__ : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) UpperCAmelCase__ : Tuple = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(_A : Optional[int] , _A : List[Any] , _A : int ): return model.decode( decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase__ : Any = decode_jitted(**_A ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase__ : Optional[int] = decode_jitted(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) ) for jitted_output, output in zip(_A , _A ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowercase_ ( self : List[str] ): '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids UpperCAmelCase__ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id UpperCAmelCase__ : Union[str, Any] = model(_A ) self.assertIsNotNone(_A ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25} UpperCAmelCase__ : int = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} UpperCAmelCase__ : str = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_A ) UpperCAmelCase__ : Optional[Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) UpperCAmelCase__ : Optional[Any] = ['''Sam'''] UpperCAmelCase__ : Dict = tokenizer(_A , return_tensors='''jax''' ) UpperCAmelCase__ : List[str] = model.generate(**_A , **_A ) UpperCAmelCase__ : Dict = '''Sam is a great name. It means "sun" in Gaelic.''' UpperCAmelCase__ : Any = tokenizer.batch_decode(_A , **_A ) assert generated_txt[0].strip() == tgt_text
75
1
'''simple docstring''' def a__ ( lowerCAmelCase__ = 1_00_00_00 ) -> int: UpperCAmelCase__ : str = 1 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : int = {1: 1} for inputa in range(2 , lowerCAmelCase__ ): UpperCAmelCase__ : List[Any] = 0 UpperCAmelCase__ : Dict = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: UpperCAmelCase__ : Any = (3 * number) + 1 counter += 1 if inputa not in counters: UpperCAmelCase__ : List[Any] = counter if counter > pre_counter: UpperCAmelCase__ : List[Any] = inputa UpperCAmelCase__ : Any = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
75
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class lowerCamelCase_ ( datasets.BeamBasedBuilder ): def lowercase_ ( self : str ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_A , ) def lowercase_ ( self : int , _A : Optional[int] , _A : Optional[Any] ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )] def lowercase_ ( self : Union[str, Any] , _A : str , _A : Union[str, Any] ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_A ) class lowerCamelCase_ ( datasets.BeamBasedBuilder ): def lowercase_ ( self : Any ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_A , ) def lowercase_ ( self : Any , _A : List[str] , _A : Any ): '''simple docstring''' return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} ) ] def lowercase_ ( self : List[str] , _A : Optional[int] , _A : Tuple ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_A ) def a__ ( ) -> Tuple: return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] def a__ ( ) -> Optional[Any]: return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] class lowerCamelCase_ ( __a ): @require_beam def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Any = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) UpperCAmelCase__ : Union[str, Any] = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A ) self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def lowercase_ ( self : Any ): '''simple docstring''' import apache_beam as beam UpperCAmelCase__ : List[str] = beam.io.parquetio.WriteToParquet UpperCAmelCase__ : int = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Optional[int] = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' ) with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock: UpperCAmelCase__ : Dict = partial(_A , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( _A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertTrue( os.path.exists( os.path.join( _A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) ) self.assertTrue( os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def lowercase_ ( self : int ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Tuple = DummyBeamDataset(cache_dir=_A ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : int = NestedBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) ) UpperCAmelCase__ : Optional[int] = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A ) self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset
75
1
'''simple docstring''' import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = ConsistencyModelPipeline lowerCAmelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS lowerCAmelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt lowerCAmelCase__ = frozenset( [ 'num_inference_steps', 'generator', 'latents', 'output_type', 'return_dict', 'callback', 'callback_steps', ] ) @property def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet''' , ) return unet @property def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , ) return unet def lowercase_ ( self : Optional[Any] , _A : int=False ): '''simple docstring''' if class_cond: UpperCAmelCase__ : Union[str, Any] = self.dummy_cond_unet else: UpperCAmelCase__ : Any = self.dummy_uncond_unet # Default to CM multistep sampler UpperCAmelCase__ : int = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) UpperCAmelCase__ : Any = { '''unet''': unet, '''scheduler''': scheduler, } return components def lowercase_ ( self : Tuple , _A : str , _A : List[Any]=0 ): '''simple docstring''' if str(_A ).startswith('''mps''' ): UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(_A ) else: UpperCAmelCase__ : int = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase__ : Tuple = { '''batch_size''': 1, '''num_inference_steps''': None, '''timesteps''': [22, 0], '''generator''': generator, '''output_type''': '''np''', } return inputs def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : int = self.get_dummy_components() UpperCAmelCase__ : str = ConsistencyModelPipeline(**_A ) UpperCAmelCase__ : Optional[int] = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : Optional[int] = self.get_dummy_inputs(_A ) UpperCAmelCase__ : int = pipe(**_A ).images assert image.shape == (1, 32, 32, 3) UpperCAmelCase__ : Any = image[0, -3:, -3:, -1] UpperCAmelCase__ : int = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : List[str] = self.get_dummy_components(class_cond=_A ) UpperCAmelCase__ : List[Any] = ConsistencyModelPipeline(**_A ) UpperCAmelCase__ : str = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : List[str] = self.get_dummy_inputs(_A ) UpperCAmelCase__ : Tuple = 0 UpperCAmelCase__ : Dict = pipe(**_A ).images assert image.shape == (1, 32, 32, 3) UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1] UpperCAmelCase__ : int = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : Union[str, Any] = self.get_dummy_components() UpperCAmelCase__ : Any = ConsistencyModelPipeline(**_A ) UpperCAmelCase__ : Optional[Any] = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : Optional[int] = self.get_dummy_inputs(_A ) UpperCAmelCase__ : List[str] = 1 UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : Any = pipe(**_A ).images assert image.shape == (1, 32, 32, 3) UpperCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1] UpperCAmelCase__ : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : int = self.get_dummy_components(class_cond=_A ) UpperCAmelCase__ : Union[str, Any] = ConsistencyModelPipeline(**_A ) UpperCAmelCase__ : int = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : Optional[Any] = self.get_dummy_inputs(_A ) UpperCAmelCase__ : Tuple = 1 UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : str = 0 UpperCAmelCase__ : str = pipe(**_A ).images assert image.shape == (1, 32, 32, 3) UpperCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1] UpperCAmelCase__ : Tuple = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : List[Any] , _A : List[str]=0 , _A : int=False , _A : List[str]="cpu" , _A : Union[str, Any]=torch.floataa , _A : List[str]=(1, 3, 64, 64) ): '''simple docstring''' UpperCAmelCase__ : Tuple = torch.manual_seed(_A ) UpperCAmelCase__ : int = { '''num_inference_steps''': None, '''timesteps''': [22, 0], '''class_labels''': 0, '''generator''': generator, '''output_type''': '''np''', } if get_fixed_latents: UpperCAmelCase__ : Any = self.get_fixed_latents(seed=_A , device=_A , dtype=_A , shape=_A ) UpperCAmelCase__ : Optional[Any] = latents return inputs def lowercase_ ( self : List[str] , _A : Tuple=0 , _A : int="cpu" , _A : Optional[int]=torch.floataa , _A : List[str]=(1, 3, 64, 64) ): '''simple docstring''' if type(_A ) == str: UpperCAmelCase__ : int = torch.device(_A ) UpperCAmelCase__ : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase__ : List[str] = randn_tensor(_A , generator=_A , device=_A , dtype=_A ) return latents def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Any = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) UpperCAmelCase__ : Dict = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) UpperCAmelCase__ : Any = ConsistencyModelPipeline(unet=_A , scheduler=_A ) pipe.to(torch_device=_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : str = self.get_inputs() UpperCAmelCase__ : List[Any] = pipe(**_A ).images assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1] UpperCAmelCase__ : Optional[int] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) UpperCAmelCase__ : Tuple = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) UpperCAmelCase__ : Any = ConsistencyModelPipeline(unet=_A , scheduler=_A ) pipe.to(torch_device=_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : str = self.get_inputs() UpperCAmelCase__ : Optional[Any] = 1 UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : str = pipe(**_A ).images assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase__ : Union[str, Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 @require_torch_a def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) UpperCAmelCase__ : Optional[int] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) UpperCAmelCase__ : List[Any] = ConsistencyModelPipeline(unet=_A , scheduler=_A ) pipe.to(torch_device=_A , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : Any = self.get_inputs(get_fixed_latents=_A , device=_A ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=_A , enable_math=_A , enable_mem_efficient=_A ): UpperCAmelCase__ : Tuple = pipe(**_A ).images assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1] UpperCAmelCase__ : str = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @require_torch_a def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : str = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) UpperCAmelCase__ : Tuple = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) UpperCAmelCase__ : int = ConsistencyModelPipeline(unet=_A , scheduler=_A ) pipe.to(torch_device=_A , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : Optional[Any] = self.get_inputs(get_fixed_latents=_A , device=_A ) UpperCAmelCase__ : Dict = 1 UpperCAmelCase__ : str = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=_A , enable_math=_A , enable_mem_efficient=_A ): UpperCAmelCase__ : Dict = pipe(**_A ).images assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1] UpperCAmelCase__ : Dict = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
75
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input UpperCamelCase__ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def a__ ( ) -> List[str]: UpperCAmelCase__ : Optional[int] = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: UpperCAmelCase__ : Any = get_sagemaker_input() else: UpperCAmelCase__ : List[str] = get_cluster_input() return config def a__ ( lowerCAmelCase__=None ) -> List[Any]: if subparsers is not None: UpperCAmelCase__ : Union[str, Any] = subparsers.add_parser('''config''' , description=lowerCAmelCase__ ) else: UpperCAmelCase__ : Dict = argparse.ArgumentParser('''Accelerate config command''' , description=lowerCAmelCase__ ) parser.add_argument( '''--config_file''' , default=lowerCAmelCase__ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=lowerCAmelCase__ ) return parser def a__ ( lowerCAmelCase__ ) -> List[Any]: UpperCAmelCase__ : List[Any] = get_user_input() if args.config_file is not None: UpperCAmelCase__ : Any = args.config_file else: if not os.path.isdir(lowerCAmelCase__ ): os.makedirs(lowerCAmelCase__ ) UpperCAmelCase__ : int = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(lowerCAmelCase__ ) else: config.to_yaml_file(lowerCAmelCase__ ) print(F"""accelerate configuration saved at {config_file}""" ) def a__ ( ) -> str: UpperCAmelCase__ : Optional[int] = config_command_parser() UpperCAmelCase__ : Any = parser.parse_args() config_command(lowerCAmelCase__ ) if __name__ == "__main__": main()
75
1
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : str ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) UpperCAmelCase__ : str = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) sd_pipe.set_scheduler('''sample_euler''' ) UpperCAmelCase__ : Dict = '''A painting of a squirrel eating a burger''' UpperCAmelCase__ : Tuple = torch.manual_seed(0 ) UpperCAmelCase__ : str = sd_pipe([prompt] , generator=_A , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) UpperCAmelCase__ : int = output.images UpperCAmelCase__ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase__ : int = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) UpperCAmelCase__ : Union[str, Any] = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) sd_pipe.set_scheduler('''sample_euler''' ) UpperCAmelCase__ : Optional[Any] = '''A painting of a squirrel eating a burger''' UpperCAmelCase__ : List[Any] = torch.manual_seed(0 ) UpperCAmelCase__ : int = sd_pipe([prompt] , generator=_A , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) UpperCAmelCase__ : Optional[int] = output.images UpperCAmelCase__ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase__ : Dict = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Any = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) UpperCAmelCase__ : int = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) UpperCAmelCase__ : int = '''A painting of a squirrel eating a burger''' UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 ) UpperCAmelCase__ : Union[str, Any] = sd_pipe( [prompt] , generator=_A , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=_A , ) UpperCAmelCase__ : Union[str, Any] = output.images UpperCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase__ : Dict = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
75
'''simple docstring''' import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: # Construct model if gpta_config_file == "": UpperCAmelCase__ : Optional[Any] = GPTaConfig() else: UpperCAmelCase__ : Tuple = GPTaConfig.from_json_file(lowerCAmelCase__ ) UpperCAmelCase__ : Dict = GPTaModel(lowerCAmelCase__ ) # Load weights from numpy load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Save pytorch-model UpperCAmelCase__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME UpperCAmelCase__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , lowerCAmelCase__ ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--gpt2_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained OpenAI model. \n''' '''This specifies the model architecture.''' ), ) UpperCamelCase__ = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
75
1
'''simple docstring''' import os import sys UpperCamelCase__ = os.path.join(os.path.dirname(__file__), '''src''') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) UpperCamelCase__ = [ '''torch''', '''numpy''', '''tokenizers''', '''filelock''', '''requests''', '''tqdm''', '''regex''', '''sentencepiece''', '''sacremoses''', '''importlib_metadata''', '''huggingface_hub''', ] @add_start_docstrings(AutoConfig.__doc__ ) def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict: return AutoConfig.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any: return AutoTokenizer.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ ) @add_start_docstrings(AutoModel.__doc__ ) def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int: return AutoModel.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]: return AutoModelForCausalLM.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int: return AutoModelForMaskedLM.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str: return AutoModelForSequenceClassification.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]: return AutoModelForQuestionAnswering.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
75
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCamelCase_ : def __init__( self : Optional[int] , _A : Optional[Any] , _A : Tuple=2 , _A : Tuple=3 , _A : Optional[Any]=4 , _A : List[Any]=2 , _A : List[Any]=7 , _A : int=True , _A : Dict=True , _A : int=True , _A : Dict=True , _A : Tuple=99 , _A : Union[str, Any]=36 , _A : int=2 , _A : List[str]=4 , _A : int=37 , _A : List[Any]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Tuple=2 , _A : Union[str, Any]=0.0_2 , _A : Any=6 , _A : Union[str, Any]=6 , _A : str=3 , _A : str=4 , _A : Tuple=None , _A : int=1_000 , ): '''simple docstring''' UpperCAmelCase__ : int = parent UpperCAmelCase__ : Optional[int] = batch_size UpperCAmelCase__ : str = num_channels UpperCAmelCase__ : str = image_size UpperCAmelCase__ : List[str] = patch_size UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : List[str] = use_input_mask UpperCAmelCase__ : Tuple = use_token_type_ids UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : int = vocab_size UpperCAmelCase__ : List[Any] = hidden_size UpperCAmelCase__ : Optional[int] = num_hidden_layers UpperCAmelCase__ : List[str] = num_attention_heads UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Dict = hidden_act UpperCAmelCase__ : int = hidden_dropout_prob UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase__ : List[str] = max_position_embeddings UpperCAmelCase__ : Tuple = type_vocab_size UpperCAmelCase__ : Any = type_sequence_label_size UpperCAmelCase__ : List[str] = initializer_range UpperCAmelCase__ : List[str] = coordinate_size UpperCAmelCase__ : Tuple = shape_size UpperCAmelCase__ : Optional[int] = num_labels UpperCAmelCase__ : Optional[Any] = num_choices UpperCAmelCase__ : Union[str, Any] = scope UpperCAmelCase__ : Optional[Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) UpperCAmelCase__ : str = text_seq_length UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1 UpperCAmelCase__ : Tuple = self.text_seq_length + self.image_seq_length def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) UpperCAmelCase__ : int = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase__ : str = bbox[i, j, 3] UpperCAmelCase__ : Dict = bbox[i, j, 1] UpperCAmelCase__ : str = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase__ : Optional[int] = bbox[i, j, 2] UpperCAmelCase__ : Any = bbox[i, j, 0] UpperCAmelCase__ : List[Any] = tmp_coordinate UpperCAmelCase__ : str = tf.constant(_A ) UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Any = None if self.use_input_mask: UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] ) UpperCAmelCase__ : Any = None if self.use_token_type_ids: UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : List[str] = None if self.use_labels: UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : int = TFLayoutLMvaModel(config=_A ) # text + image UpperCAmelCase__ : Tuple = model(_A , pixel_values=_A , training=_A ) UpperCAmelCase__ : Tuple = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , ) UpperCAmelCase__ : Optional[Any] = model(_A , bbox=_A , pixel_values=_A , training=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only UpperCAmelCase__ : Any = model(_A , training=_A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only UpperCAmelCase__ : str = model({'''pixel_values''': pixel_values} , training=_A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Any , _A : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.num_labels UpperCAmelCase__ : int = TFLayoutLMvaForSequenceClassification(config=_A ) UpperCAmelCase__ : Union[str, Any] = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self : Dict , _A : List[Any] , _A : Any , _A : Dict , _A : str , _A : Optional[int] , _A : str , _A : str ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.num_labels UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=_A ) UpperCAmelCase__ : Optional[int] = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowercase_ ( self : Dict , _A : Dict , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Tuple , _A : Dict , _A : str ): '''simple docstring''' UpperCAmelCase__ : str = 2 UpperCAmelCase__ : Dict = TFLayoutLMvaForQuestionAnswering(config=_A ) UpperCAmelCase__ : str = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : int = self.prepare_config_and_inputs() ((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = config_and_inputs UpperCAmelCase__ : List[Any] = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowerCAmelCase__ = ( {'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel} if is_tf_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Dict , _A : List[str] ): '''simple docstring''' return True def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Any , _A : Dict=False ): '''simple docstring''' UpperCAmelCase__ : List[Any] = copy.deepcopy(_A ) if model_class in get_values(_A ): UpperCAmelCase__ : Tuple = { k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(_A , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(_A ): UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_A ): UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_A ): UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_A ): UpperCAmelCase__ : int = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Any = TFLayoutLMvaModelTester(self ) UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 ) def lowercase_ ( self : str ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Optional[Any] = model_class(_A ) if getattr(_A , '''hf_compute_loss''' , _A ): # The number of elements in the loss should be the same as the number of elements in the label UpperCAmelCase__ : Tuple = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : List[Any] = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0] ] UpperCAmelCase__ : Optional[Any] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs UpperCAmelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' ) UpperCAmelCase__ : List[Any] = model(_A , **_A )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' ) if "labels" in prepared_for_class: UpperCAmelCase__ : Optional[Any] = prepared_for_class['''labels'''].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: UpperCAmelCase__ : Any = -100 UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(_A ) UpperCAmelCase__ : int = model(_A , **_A )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : Dict = model(_A )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple UpperCAmelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) # Get keys that were added with the _prepare_for_class function UpperCAmelCase__ : Optional[int] = prepared_for_class.keys() - inputs_dict.keys() UpperCAmelCase__ : int = inspect.signature(model.call ).parameters UpperCAmelCase__ : Union[str, Any] = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple UpperCAmelCase__ : Dict = {0: '''input_ids'''} for label_key in label_keys: UpperCAmelCase__ : str = signature_names.index(_A ) UpperCAmelCase__ : List[Any] = label_key UpperCAmelCase__ : Dict = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple UpperCAmelCase__ : Tuple = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: UpperCAmelCase__ : Any = prepared_for_class[value] UpperCAmelCase__ : Tuple = tuple(_A ) # Send to model UpperCAmelCase__ : Optional[Any] = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def lowercase_ ( self : int ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A ) def lowercase_ ( self : Tuple ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase__ : Union[str, Any] = type self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A ) def lowercase_ ( self : List[str] ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( _A , _A , _A , _A , _A , _A , _A ) def lowercase_ ( self : Any ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( _A , _A , _A , _A , _A , _A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( _A , _A , _A , _A , _A , _A , _A ) @slow def lowercase_ ( self : List[Any] ): '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def a__ ( ) -> List[str]: UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf class lowerCamelCase_ ( unittest.TestCase ): @cached_property def lowercase_ ( self : Dict ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None @slow def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ) UpperCAmelCase__ : Dict = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''tf''' ).pixel_values UpperCAmelCase__ : str = tf.constant([[1, 2]] ) UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass UpperCAmelCase__ : int = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A ) # verify the logits UpperCAmelCase__ : Optional[int] = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , _A ) UpperCAmelCase__ : Dict = tf.constant( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
75
1
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'char' lowerCAmelCase__ = 'bpe' lowerCAmelCase__ = 'wp' UpperCamelCase__ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = ['image_processor', 'char_tokenizer'] lowerCAmelCase__ = 'ViTImageProcessor' lowerCAmelCase__ = 'MgpstrTokenizer' def __init__( self : Optional[int] , _A : List[Any]=None , _A : Tuple=None , **_A : List[str] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _A , ) UpperCAmelCase__ : Optional[int] = kwargs.pop('''feature_extractor''' ) UpperCAmelCase__ : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) UpperCAmelCase__ : Dict = tokenizer UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''gpt2''' ) UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-uncased''' ) super().__init__(_A , _A ) def __call__( self : Dict , _A : Any=None , _A : Tuple=None , _A : Dict=None , **_A : int ): '''simple docstring''' if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: UpperCAmelCase__ : Any = self.image_processor(_A , return_tensors=_A , **_A ) if text is not None: UpperCAmelCase__ : Any = self.char_tokenizer(_A , return_tensors=_A , **_A ) if text is None: return inputs elif images is None: return encodings else: UpperCAmelCase__ : Any = encodings['''input_ids'''] return inputs def lowercase_ ( self : Optional[int] , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = sequences UpperCAmelCase__ : Union[str, Any] = char_preds.size(0 ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self._decode_helper(_A , '''char''' ) UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._decode_helper(_A , '''bpe''' ) UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self._decode_helper(_A , '''wp''' ) UpperCAmelCase__ : str = [] UpperCAmelCase__ : Optional[int] = [] for i in range(_A ): UpperCAmelCase__ : Tuple = [char_scores[i], bpe_scores[i], wp_scores[i]] UpperCAmelCase__ : Tuple = [char_strs[i], bpe_strs[i], wp_strs[i]] UpperCAmelCase__ : int = scores.index(max(_A ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) UpperCAmelCase__ : Any = {} UpperCAmelCase__ : Optional[int] = final_strs UpperCAmelCase__ : Dict = final_scores UpperCAmelCase__ : Optional[Any] = char_strs UpperCAmelCase__ : List[Any] = bpe_strs UpperCAmelCase__ : Dict = wp_strs return out def lowercase_ ( self : str , _A : Union[str, Any] , _A : Dict ): '''simple docstring''' if format == DecodeType.CHARACTER: UpperCAmelCase__ : List[str] = self.char_decode UpperCAmelCase__ : str = 1 UpperCAmelCase__ : Optional[Any] = '''[s]''' elif format == DecodeType.BPE: UpperCAmelCase__ : Optional[Any] = self.bpe_decode UpperCAmelCase__ : str = 2 UpperCAmelCase__ : str = '''#''' elif format == DecodeType.WORDPIECE: UpperCAmelCase__ : Union[str, Any] = self.wp_decode UpperCAmelCase__ : Dict = 102 UpperCAmelCase__ : int = '''[SEP]''' else: raise ValueError(f"""Format {format} is not supported.""" ) UpperCAmelCase__ , UpperCAmelCase__ : str = [], [] UpperCAmelCase__ : int = pred_logits.size(0 ) UpperCAmelCase__ : Tuple = pred_logits.size(1 ) UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = pred_logits.topk(1 , dim=-1 , largest=_A , sorted=_A ) UpperCAmelCase__ : Tuple = preds_index.view(-1 , _A )[:, 1:] UpperCAmelCase__ : Optional[int] = decoder(_A ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = torch.nn.functional.softmax(_A , dim=2 ).max(dim=2 ) UpperCAmelCase__ : Optional[Any] = preds_max_prob[:, 1:] for index in range(_A ): UpperCAmelCase__ : Optional[int] = preds_str[index].find(_A ) UpperCAmelCase__ : Tuple = preds_str[index][:pred_eos] UpperCAmelCase__ : Dict = preds_index[index].cpu().tolist() UpperCAmelCase__ : str = pred_index.index(_A ) if eos_token in pred_index else -1 UpperCAmelCase__ : List[str] = preds_max_prob[index][: pred_eos_index + 1] UpperCAmelCase__ : Tuple = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(_A ) conf_scores.append(_A ) return dec_strs, conf_scores def lowercase_ ( self : Any , _A : int ): '''simple docstring''' UpperCAmelCase__ : Dict = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(_A )] return decode_strs def lowercase_ ( self : str , _A : List[Any] ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(_A ) def lowercase_ ( self : Union[str, Any] , _A : List[str] ): '''simple docstring''' UpperCAmelCase__ : Any = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(_A )] return decode_strs
75
'''simple docstring''' import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": UpperCamelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') UpperCamelCase__ = F"""https://www.google.com/search?q={query}&num=100""" UpperCamelCase__ = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: UpperCamelCase__ = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: UpperCamelCase__ = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
75
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor UpperCamelCase__ = logging.get_logger(__name__) class lowerCamelCase_ ( __a ): def __init__( self : Union[str, Any] , *_A : Any , **_A : int ): '''simple docstring''' warnings.warn( '''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use FlavaImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
75
'''simple docstring''' from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray: UpperCAmelCase__ : List[str] = cva.getAffineTransform(lowerCAmelCase__ , lowerCAmelCase__ ) return cva.warpAffine(lowerCAmelCase__ , lowerCAmelCase__ , (rows, cols) ) if __name__ == "__main__": # read original image UpperCamelCase__ = cva.imread( str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''') ) # turn image in gray scale value UpperCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape UpperCamelCase__ , UpperCamelCase__ = gray_img.shape # set different points to rotate image UpperCamelCase__ = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa) UpperCamelCase__ = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa) UpperCamelCase__ = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa) UpperCamelCase__ = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa) # add all rotated images in a list UpperCamelCase__ = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations UpperCamelCase__ = plt.figure(1) UpperCamelCase__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3'''] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''') plt.title(titles[i]) plt.axis('''off''') plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
75
1
'''simple docstring''' import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer UpperCamelCase__ = logging.get_logger(__name__) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'AutoTokenizer' lowerCAmelCase__ = ['tokenizer'] lowerCAmelCase__ = { 'semantic_prompt': 1, 'coarse_prompt': 2, 'fine_prompt': 2, } def __init__( self : str , _A : Tuple , _A : List[str]=None ): '''simple docstring''' super().__init__(_A ) UpperCAmelCase__ : str = speaker_embeddings @classmethod def lowercase_ ( cls : Union[str, Any] , _A : Any , _A : List[str]="speaker_embeddings_path.json" , **_A : List[str] ): '''simple docstring''' if speaker_embeddings_dict_path is not None: UpperCAmelCase__ : List[Any] = get_file_from_repo( _A , _A , subfolder=kwargs.pop('''subfolder''' , _A ) , cache_dir=kwargs.pop('''cache_dir''' , _A ) , force_download=kwargs.pop('''force_download''' , _A ) , proxies=kwargs.pop('''proxies''' , _A ) , resume_download=kwargs.pop('''resume_download''' , _A ) , local_files_only=kwargs.pop('''local_files_only''' , _A ) , use_auth_token=kwargs.pop('''use_auth_token''' , _A ) , revision=kwargs.pop('''revision''' , _A ) , ) if speaker_embeddings_path is None: logger.warning( f"""`{os.path.join(_A , _A )}` does not exists , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" ) UpperCAmelCase__ : List[str] = None else: with open(_A ) as speaker_embeddings_json: UpperCAmelCase__ : Any = json.load(_A ) else: UpperCAmelCase__ : str = None UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , **_A ) return cls(tokenizer=_A , speaker_embeddings=_A ) def lowercase_ ( self : Any , _A : Tuple , _A : Tuple="speaker_embeddings_path.json" , _A : Union[str, Any]="speaker_embeddings" , _A : bool = False , **_A : Union[str, Any] , ): '''simple docstring''' if self.speaker_embeddings is not None: os.makedirs(os.path.join(_A , _A , '''v2''' ) , exist_ok=_A ) UpperCAmelCase__ : str = {} UpperCAmelCase__ : Any = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": UpperCAmelCase__ : Union[str, Any] = self._load_voice_preset(_A ) UpperCAmelCase__ : Optional[int] = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['''repo_or_path'''] , _A , f"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=_A , ) UpperCAmelCase__ : Union[str, Any] = os.path.join(_A , f"""{prompt_key}_{key}.npy""" ) UpperCAmelCase__ : Optional[int] = tmp_dict with open(os.path.join(_A , _A ) , '''w''' ) as fp: json.dump(_A , _A ) super().save_pretrained(_A , _A , **_A ) def lowercase_ ( self : Optional[Any] , _A : str = None , **_A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.speaker_embeddings[voice_preset] UpperCAmelCase__ : int = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( f"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" ) UpperCAmelCase__ : List[Any] = get_file_from_repo( self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , _A ) , cache_dir=kwargs.pop('''cache_dir''' , _A ) , force_download=kwargs.pop('''force_download''' , _A ) , proxies=kwargs.pop('''proxies''' , _A ) , resume_download=kwargs.pop('''resume_download''' , _A ) , local_files_only=kwargs.pop('''local_files_only''' , _A ) , use_auth_token=kwargs.pop('''use_auth_token''' , _A ) , revision=kwargs.pop('''revision''' , _A ) , ) if path is None: raise ValueError( f"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset} embeddings.""" ) UpperCAmelCase__ : List[Any] = np.load(_A ) return voice_preset_dict def lowercase_ ( self : str , _A : Optional[dict] = None ): '''simple docstring''' for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(f"""Voice preset unrecognized, missing {key} as a key.""" ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" ) def __call__( self : int , _A : Optional[Any]=None , _A : Dict=None , _A : int="pt" , _A : Any=256 , _A : int=False , _A : str=True , _A : Union[str, Any]=False , **_A : Tuple , ): '''simple docstring''' if voice_preset is not None and not isinstance(_A , _A ): if ( isinstance(_A , _A ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): UpperCAmelCase__ : Dict = self._load_voice_preset(_A ) else: if isinstance(_A , _A ) and not voice_preset.endswith('''.npz''' ): UpperCAmelCase__ : Optional[Any] = voice_preset + '''.npz''' UpperCAmelCase__ : Tuple = np.load(_A ) if voice_preset is not None: self._validate_voice_preset_dict(_A , **_A ) UpperCAmelCase__ : int = BatchFeature(data=_A , tensor_type=_A ) UpperCAmelCase__ : str = self.tokenizer( _A , return_tensors=_A , padding='''max_length''' , max_length=_A , return_attention_mask=_A , return_token_type_ids=_A , add_special_tokens=_A , **_A , ) if voice_preset is not None: UpperCAmelCase__ : int = voice_preset return encoded_text
75
'''simple docstring''' from datetime import datetime as dt import os from github import Github UpperCamelCase__ = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''feature request''', '''new model''', '''wip''', ] def a__ ( ) -> List[str]: UpperCAmelCase__ : int = Github(os.environ['''GITHUB_TOKEN'''] ) UpperCAmelCase__ : List[Any] = g.get_repo('''huggingface/transformers''' ) UpperCAmelCase__ : List[str] = repo.get_issues(state='''open''' ) for issue in open_issues: UpperCAmelCase__ : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = comments[0] if len(lowerCAmelCase__ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
75
1
'''simple docstring''' import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) def a__ ( lowerCAmelCase__ ) -> str: UpperCAmelCase__ : Union[str, Any] = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: UpperCAmelCase__ : List[Any] = 1_28 elif "12-12" in model_name: UpperCAmelCase__ : Dict = 12 UpperCAmelCase__ : str = 12 elif "14-14" in model_name: UpperCAmelCase__ : Dict = 14 UpperCAmelCase__ : Tuple = 14 elif "16-16" in model_name: UpperCAmelCase__ : List[str] = 16 UpperCAmelCase__ : Optional[Any] = 16 else: raise ValueError('''Model not supported''' ) UpperCAmelCase__ : Optional[int] = '''huggingface/label-files''' if "speech-commands" in model_name: UpperCAmelCase__ : List[Any] = 35 UpperCAmelCase__ : Optional[int] = '''speech-commands-v2-id2label.json''' else: UpperCAmelCase__ : List[Any] = 5_27 UpperCAmelCase__ : Union[str, Any] = '''audioset-id2label.json''' UpperCAmelCase__ : Dict = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase__ : Optional[Any] = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()} UpperCAmelCase__ : Optional[Any] = idalabel UpperCAmelCase__ : str = {v: k for k, v in idalabel.items()} return config def a__ ( lowerCAmelCase__ ) -> Any: if "module.v" in name: UpperCAmelCase__ : Any = name.replace('''module.v''' , '''audio_spectrogram_transformer''' ) if "cls_token" in name: UpperCAmelCase__ : Dict = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "dist_token" in name: UpperCAmelCase__ : int = name.replace('''dist_token''' , '''embeddings.distillation_token''' ) if "pos_embed" in name: UpperCAmelCase__ : str = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: UpperCAmelCase__ : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) # transformer blocks if "blocks" in name: UpperCAmelCase__ : Tuple = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: UpperCAmelCase__ : Optional[int] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: UpperCAmelCase__ : Optional[int] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: UpperCAmelCase__ : List[str] = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: UpperCAmelCase__ : Union[str, Any] = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: UpperCAmelCase__ : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: UpperCAmelCase__ : Optional[Any] = name.replace('''mlp.fc2''' , '''output.dense''' ) # final layernorm if "audio_spectrogram_transformer.norm" in name: UpperCAmelCase__ : str = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' ) # classifier head if "module.mlp_head.0" in name: UpperCAmelCase__ : Tuple = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' ) if "module.mlp_head.1" in name: UpperCAmelCase__ : str = name.replace('''module.mlp_head.1''' , '''classifier.dense''' ) return name def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: for key in orig_state_dict.copy().keys(): UpperCAmelCase__ : Dict = orig_state_dict.pop(lowerCAmelCase__ ) if "qkv" in key: UpperCAmelCase__ : Union[str, Any] = key.split('''.''' ) UpperCAmelCase__ : Dict = int(key_split[3] ) UpperCAmelCase__ : Dict = config.hidden_size if "weight" in key: UpperCAmelCase__ : Any = val[:dim, :] UpperCAmelCase__ : int = val[dim : dim * 2, :] UpperCAmelCase__ : List[Any] = val[-dim:, :] else: UpperCAmelCase__ : List[str] = val[:dim] UpperCAmelCase__ : Optional[Any] = val[dim : dim * 2] UpperCAmelCase__ : Union[str, Any] = val[-dim:] else: UpperCAmelCase__ : List[Any] = val return orig_state_dict def a__ ( lowerCAmelCase__ ) -> List[Any]: UpperCAmelCase__ : int = [ '''module.v.head.weight''', '''module.v.head.bias''', '''module.v.head_dist.weight''', '''module.v.head_dist.bias''', ] for k in ignore_keys: state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ ) @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[Any]: UpperCAmelCase__ : Tuple = get_audio_spectrogram_transformer_config(lowerCAmelCase__ ) UpperCAmelCase__ : Any = { '''ast-finetuned-audioset-10-10-0.4593''': ( '''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.450''': ( '''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448''': ( '''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448-v2''': ( '''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1''' ), '''ast-finetuned-audioset-12-12-0.447''': ( '''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1''' ), '''ast-finetuned-audioset-14-14-0.443''': ( '''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1''' ), '''ast-finetuned-audioset-16-16-0.442''': ( '''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1''' ), '''ast-finetuned-speech-commands-v2''': ( '''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1''' ), } # load original state_dict UpperCAmelCase__ : Any = model_name_to_url[model_name] UpperCAmelCase__ : str = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' ) # remove some keys remove_keys(lowerCAmelCase__ ) # rename some keys UpperCAmelCase__ : int = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ ) # load 🤗 model UpperCAmelCase__ : int = ASTForAudioClassification(lowerCAmelCase__ ) model.eval() model.load_state_dict(lowerCAmelCase__ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 UpperCAmelCase__ : Any = -4.2_6_7_7_3_9_3 if '''speech-commands''' not in model_name else -6.8_4_5_9_7_8 UpperCAmelCase__ : Union[str, Any] = 4.5_6_8_9_9_7_4 if '''speech-commands''' not in model_name else 5.5_6_5_4_5_2_6 UpperCAmelCase__ : int = 10_24 if '''speech-commands''' not in model_name else 1_28 UpperCAmelCase__ : Optional[Any] = ASTFeatureExtractor(mean=lowerCAmelCase__ , std=lowerCAmelCase__ , max_length=lowerCAmelCase__ ) if "speech-commands" in model_name: UpperCAmelCase__ : Optional[Any] = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' ) UpperCAmelCase__ : str = dataset[0]['''audio''']['''array'''] else: UpperCAmelCase__ : Union[str, Any] = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , ) UpperCAmelCase__ , UpperCAmelCase__ : Any = torchaudio.load(lowerCAmelCase__ ) UpperCAmelCase__ : Dict = waveform.squeeze().numpy() UpperCAmelCase__ : Tuple = feature_extractor(lowerCAmelCase__ , sampling_rate=1_60_00 , return_tensors='''pt''' ) # forward pass UpperCAmelCase__ : Tuple = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Any = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": UpperCAmelCase__ : Optional[int] = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": UpperCAmelCase__ : Union[str, Any] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": UpperCAmelCase__ : Tuple = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": UpperCAmelCase__ : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": UpperCAmelCase__ : Union[str, Any] = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": UpperCAmelCase__ : List[str] = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] ) elif model_name == "ast-finetuned-speech-commands-v2": UpperCAmelCase__ : str = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] ) else: raise ValueError('''Unknown model name''' ) if not torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ): raise ValueError('''Logits don\'t match''' ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCAmelCase__ ) print(F"""Saving feature extractor to {pytorch_dump_folder_path}""" ) feature_extractor.save_pretrained(lowerCAmelCase__ ) if push_to_hub: print('''Pushing model and feature extractor to the hub...''' ) model.push_to_hub(F"""MIT/{model_name}""" ) feature_extractor.push_to_hub(F"""MIT/{model_name}""" ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''ast-finetuned-audioset-10-10-0.4593''', type=str, help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) UpperCamelCase__ = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
75
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCamelCase_ ( __a ): def __init__( self : Dict , _A : List[str] , _A : int ): '''simple docstring''' super().__init__() self.register_modules(unet=_A , scheduler=_A ) @torch.no_grad() def __call__( self : List[Any] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ): '''simple docstring''' if audio_length_in_s is None: UpperCAmelCase__ : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate UpperCAmelCase__ : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate UpperCAmelCase__ : List[Any] = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) UpperCAmelCase__ : List[Any] = int(_A ) if sample_size % down_scale_factor != 0: UpperCAmelCase__ : int = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" ''' process.''' ) UpperCAmelCase__ : Dict = int(_A ) UpperCAmelCase__ : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype UpperCAmelCase__ : int = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(_A , _A ) and len(_A ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) UpperCAmelCase__ : Optional[int] = randn_tensor(_A , generator=_A , device=self.device , dtype=_A ) # set step values self.scheduler.set_timesteps(_A , device=audio.device ) UpperCAmelCase__ : List[str] = self.scheduler.timesteps.to(_A ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCAmelCase__ : Optional[int] = self.unet(_A , _A ).sample # 2. compute previous image: x_t -> t_t-1 UpperCAmelCase__ : List[Any] = self.scheduler.step(_A , _A , _A ).prev_sample UpperCAmelCase__ : Any = audio.clamp(-1 , 1 ).float().cpu().numpy() UpperCAmelCase__ : Any = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=_A )
75
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ = { '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
75
'''simple docstring''' from math import factorial def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float: if successes > trials: raise ValueError('''successes must be lower or equal to trials''' ) if trials < 0 or successes < 0: raise ValueError('''the function is defined for non-negative integers''' ) if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError('''the function is defined for non-negative integers''' ) if not 0 < prob < 1: raise ValueError('''prob has to be in range of 1 - 0''' ) UpperCAmelCase__ : Any = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! UpperCAmelCase__ : Any = float(factorial(lowerCAmelCase__ ) ) coefficient /= factorial(lowerCAmelCase__ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('''Probability of 2 successes out of 4 trails''') print('''with probability of 0.75 is:''', end=''' ''') print(binomial_distribution(2, 4, 0.75))
75
1
'''simple docstring''' import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def a__ ( lowerCAmelCase__ ) -> Any: UpperCAmelCase__ : Union[str, Any] = [ '''decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> int: UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = emb.weight.shape UpperCAmelCase__ : Tuple = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ ) UpperCAmelCase__ : Dict = emb.weight.data return lin_layer def a__ ( lowerCAmelCase__ ) -> List[str]: UpperCAmelCase__ : str = torch.load(lowerCAmelCase__ , map_location='''cpu''' ) UpperCAmelCase__ : int = Namespace(**checkpoint['''cfg''']['''model'''] ) UpperCAmelCase__ : Optional[int] = checkpoint['''model'''] remove_ignore_keys_(lowerCAmelCase__ ) UpperCAmelCase__ : Union[str, Any] = state_dict['''decoder.embed_tokens.weight'''].shape[0] UpperCAmelCase__ : int = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()} UpperCAmelCase__ : List[Any] = XGLMConfig( vocab_size=lowerCAmelCase__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) UpperCAmelCase__ : List[str] = XGLMForCausalLM(lowerCAmelCase__ ) UpperCAmelCase__ : str = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ ) print(lowerCAmelCase__ ) UpperCAmelCase__ : str = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') UpperCamelCase__ = parser.parse_args() UpperCamelCase__ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
75
'''simple docstring''' from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCamelCase__ = logging.get_logger(__name__) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = ['pixel_values'] def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_A : int , ): '''simple docstring''' super().__init__(**_A ) UpperCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 224} UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A ) UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase__ : List[str] = get_size_dict(_A , param_name='''crop_size''' ) UpperCAmelCase__ : str = do_resize UpperCAmelCase__ : List[Any] = size UpperCAmelCase__ : int = resample UpperCAmelCase__ : int = do_center_crop UpperCAmelCase__ : List[str] = crop_size UpperCAmelCase__ : Union[str, Any] = do_rescale UpperCAmelCase__ : Optional[int] = rescale_factor UpperCAmelCase__ : List[Any] = do_normalize UpperCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN UpperCAmelCase__ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD def lowercase_ ( self : str , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = get_size_dict(_A , default_to_square=_A ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: UpperCAmelCase__ : Tuple = int((256 / 224) * size['''shortest_edge'''] ) UpperCAmelCase__ : Tuple = get_resize_output_image_size(_A , size=_A , default_to_square=_A ) UpperCAmelCase__ : Dict = {'''height''': output_size[0], '''width''': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" ) return resize( _A , size=(size_dict['''height'''], size_dict['''width''']) , resample=_A , data_format=_A , **_A ) def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A ) def lowercase_ ( self : List[str] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ): '''simple docstring''' return rescale(_A , scale=_A , data_format=_A , **_A ) def lowercase_ ( self : Dict , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ): '''simple docstring''' return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def lowercase_ ( self : Optional[Any] , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[TensorType] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ): '''simple docstring''' UpperCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize UpperCAmelCase__ : Optional[int] = resample if resample is not None else self.resample UpperCAmelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean UpperCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std UpperCAmelCase__ : Tuple = size if size is not None else self.size UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A ) UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase__ : int = get_size_dict(_A , param_name='''crop_size''' ) UpperCAmelCase__ : Union[str, Any] = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase__ : int = [to_numpy_array(_A ) for image in images] if do_resize: UpperCAmelCase__ : str = [self.resize(_A , _A , _A ) for image in images] if do_center_crop: UpperCAmelCase__ : Tuple = [self.center_crop(_A , _A ) for image in images] if do_rescale: UpperCAmelCase__ : Optional[int] = [self.rescale(_A , _A ) for image in images] if do_normalize: UpperCAmelCase__ : Any = [self.normalize(_A , _A , _A ) for image in images] UpperCAmelCase__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images] UpperCAmelCase__ : Dict = {'''pixel_values''': images} return BatchFeature(data=_A , tensor_type=_A )
75
1
'''simple docstring''' from math import loga def a__ ( lowerCAmelCase__ ) -> int: if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise TypeError('''Input value must be a \'int\' type''' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
75
'''simple docstring''' import math def a__ ( ) -> None: UpperCAmelCase__ : List[str] = input('''Enter message: ''' ) UpperCAmelCase__ : Any = int(input(F"""Enter key [2-{len(lowerCAmelCase__ ) - 1}]: """ ) ) UpperCAmelCase__ : List[str] = input('''Encryption/Decryption [e/d]: ''' ) if mode.lower().startswith('''e''' ): UpperCAmelCase__ : Dict = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ ) elif mode.lower().startswith('''d''' ): UpperCAmelCase__ : Optional[int] = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(F"""Output:\n{text + "|"}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: UpperCAmelCase__ : Optional[int] = [''''''] * key for col in range(lowerCAmelCase__ ): UpperCAmelCase__ : Tuple = col while pointer < len(lowerCAmelCase__ ): cipher_text[col] += message[pointer] pointer += key return "".join(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: UpperCAmelCase__ : int = math.ceil(len(lowerCAmelCase__ ) / key ) UpperCAmelCase__ : Any = key UpperCAmelCase__ : Optional[int] = (num_cols * num_rows) - len(lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = [''''''] * num_cols UpperCAmelCase__ : List[str] = 0 UpperCAmelCase__ : List[Any] = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): UpperCAmelCase__ : Optional[int] = 0 row += 1 return "".join(lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
75
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ = { '''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegatronBertForCausalLM''', '''MegatronBertForMaskedLM''', '''MegatronBertForMultipleChoice''', '''MegatronBertForNextSentencePrediction''', '''MegatronBertForPreTraining''', '''MegatronBertForQuestionAnswering''', '''MegatronBertForSequenceClassification''', '''MegatronBertForTokenClassification''', '''MegatronBertModel''', '''MegatronBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
75
'''simple docstring''' class lowerCamelCase_ : def __init__( self : str , _A : Union[str, Any] , _A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = name UpperCAmelCase__ : Union[str, Any] = val def __str__( self : Tuple ): '''simple docstring''' return f"""{self.__class__.__name__}({self.name}, {self.val})""" def __lt__( self : Union[str, Any] , _A : Dict ): '''simple docstring''' return self.val < other.val class lowerCamelCase_ : def __init__( self : int , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = {} UpperCAmelCase__ : int = {} UpperCAmelCase__ : Any = self.build_heap(_A ) def __getitem__( self : Any , _A : Any ): '''simple docstring''' return self.get_value(_A ) def lowercase_ ( self : Any , _A : List[Any] ): '''simple docstring''' return (idx - 1) // 2 def lowercase_ ( self : Union[str, Any] , _A : Optional[int] ): '''simple docstring''' return idx * 2 + 1 def lowercase_ ( self : Tuple , _A : List[Any] ): '''simple docstring''' return idx * 2 + 2 def lowercase_ ( self : List[str] , _A : Tuple ): '''simple docstring''' return self.heap_dict[key] def lowercase_ ( self : str , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = len(_A ) - 1 UpperCAmelCase__ : Tuple = self.get_parent_idx(_A ) for idx, i in enumerate(_A ): UpperCAmelCase__ : Dict = idx UpperCAmelCase__ : Optional[Any] = i.val for i in range(_A , -1 , -1 ): self.sift_down(_A , _A ) return array def lowercase_ ( self : Optional[Any] , _A : str , _A : List[Any] ): '''simple docstring''' while True: UpperCAmelCase__ : Any = self.get_left_child_idx(_A ) # noqa: E741 UpperCAmelCase__ : Optional[Any] = self.get_right_child_idx(_A ) UpperCAmelCase__ : Tuple = idx if l < len(_A ) and array[l] < array[idx]: UpperCAmelCase__ : int = l if r < len(_A ) and array[r] < array[smallest]: UpperCAmelCase__ : Dict = r if smallest != idx: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = array[smallest], array[idx] ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : List[str] = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) UpperCAmelCase__ : str = smallest else: break def lowercase_ ( self : List[str] , _A : int ): '''simple docstring''' UpperCAmelCase__ : str = self.get_parent_idx(_A ) while p >= 0 and self.heap[p] > self.heap[idx]: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.heap[idx], self.heap[p] UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) UpperCAmelCase__ : Union[str, Any] = p UpperCAmelCase__ : List[Any] = self.get_parent_idx(_A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' return self.heap[0] def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Any = self.heap[-1], self.heap[0] UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) UpperCAmelCase__ : int = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def lowercase_ ( self : int , _A : Union[str, Any] ): '''simple docstring''' self.heap.append(_A ) UpperCAmelCase__ : Union[str, Any] = len(self.heap ) - 1 UpperCAmelCase__ : Optional[Any] = node.val self.sift_up(len(self.heap ) - 1 ) def lowercase_ ( self : str ): '''simple docstring''' return len(self.heap ) == 0 def lowercase_ ( self : int , _A : Optional[Any] , _A : str ): '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" UpperCAmelCase__ : Optional[Any] = new_value UpperCAmelCase__ : List[str] = new_value self.sift_up(self.idx_of_element[node] ) UpperCamelCase__ = Node('''R''', -1) UpperCamelCase__ = Node('''B''', 6) UpperCamelCase__ = Node('''A''', 3) UpperCamelCase__ = Node('''X''', 1) UpperCamelCase__ = Node('''E''', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array UpperCamelCase__ = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('''Min Heap - before decrease key''') for i in my_min_heap.heap: print(i) print('''Min Heap - After decrease key of node [B -> -17]''') my_min_heap.decrease_key(b, -1_7) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
75
1
'''simple docstring''' UpperCamelCase__ = frozenset( [ '''prompt''', '''height''', '''width''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', '''cross_attention_kwargs''', ] ) UpperCamelCase__ = frozenset(['''prompt''', '''negative_prompt''']) UpperCamelCase__ = frozenset([]) UpperCamelCase__ = frozenset(['''image''']) UpperCamelCase__ = frozenset( [ '''image''', '''height''', '''width''', '''guidance_scale''', ] ) UpperCamelCase__ = frozenset(['''image''']) UpperCamelCase__ = frozenset( [ '''prompt''', '''image''', '''height''', '''width''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', ] ) UpperCamelCase__ = frozenset(['''prompt''', '''image''', '''negative_prompt''']) UpperCamelCase__ = frozenset( [ # Text guided image variation with an image mask '''prompt''', '''image''', '''mask_image''', '''height''', '''width''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', ] ) UpperCamelCase__ = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt''']) UpperCamelCase__ = frozenset( [ # image variation with an image mask '''image''', '''mask_image''', '''height''', '''width''', '''guidance_scale''', ] ) UpperCamelCase__ = frozenset(['''image''', '''mask_image''']) UpperCamelCase__ = frozenset( [ '''example_image''', '''image''', '''mask_image''', '''height''', '''width''', '''guidance_scale''', ] ) UpperCamelCase__ = frozenset(['''example_image''', '''image''', '''mask_image''']) UpperCamelCase__ = frozenset(['''class_labels''']) UpperCamelCase__ = frozenset(['''class_labels''']) UpperCamelCase__ = frozenset(['''batch_size''']) UpperCamelCase__ = frozenset([]) UpperCamelCase__ = frozenset(['''batch_size''']) UpperCamelCase__ = frozenset([]) UpperCamelCase__ = frozenset( [ '''prompt''', '''audio_length_in_s''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', '''cross_attention_kwargs''', ] ) UpperCamelCase__ = frozenset(['''prompt''', '''negative_prompt''']) UpperCamelCase__ = frozenset(['''input_tokens''']) UpperCamelCase__ = frozenset(['''input_tokens'''])
75
'''simple docstring''' import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py UpperCamelCase__ = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. UpperCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS) UpperCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` UpperCamelCase__ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') UpperCamelCase__ = { '''DecisionTransformerConfig''', '''EncoderDecoderConfig''', '''MusicgenConfig''', '''RagConfig''', '''SpeechEncoderDecoderConfig''', '''TimmBackboneConfig''', '''VisionEncoderDecoderConfig''', '''VisionTextDualEncoderConfig''', '''LlamaConfig''', } def a__ ( lowerCAmelCase__ ) -> List[str]: UpperCAmelCase__ : str = None # source code of `config_class` UpperCAmelCase__ : str = inspect.getsource(lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = _re_checkpoint.findall(lowerCAmelCase__ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('''/''' ): UpperCAmelCase__ : List[str] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link UpperCAmelCase__ : Union[str, Any] = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: UpperCAmelCase__ : Any = ckpt_name break return checkpoint def a__ ( ) -> Dict: UpperCAmelCase__ : Optional[Any] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue UpperCAmelCase__ : Any = get_checkpoint_from_config_class(lowerCAmelCase__ ) UpperCAmelCase__ : Optional[int] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0: UpperCAmelCase__ : List[str] = '''\n'''.join(sorted(lowerCAmelCase__ ) ) raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
75
1
'''simple docstring''' import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class lowerCamelCase_ ( unittest.TestCase ): def __init__( self : Optional[int] , _A : Tuple , _A : int=100 , _A : int=13 , _A : Optional[int]=30 , _A : int=2 , _A : Dict=3 , _A : Union[str, Any]=True , _A : Tuple=True , _A : Optional[int]=32 , _A : str=5 , _A : Optional[Any]=4 , _A : int=37 , _A : str="gelu" , _A : Tuple=0.1 , _A : int=0.1 , _A : str=10 , _A : List[Any]=0.0_2 , _A : Any=3 , ): '''simple docstring''' UpperCAmelCase__ : Any = parent UpperCAmelCase__ : Any = vocab_size UpperCAmelCase__ : str = batch_size UpperCAmelCase__ : Dict = image_size UpperCAmelCase__ : Union[str, Any] = patch_size UpperCAmelCase__ : List[Any] = num_channels UpperCAmelCase__ : Dict = is_training UpperCAmelCase__ : Optional[Any] = use_labels UpperCAmelCase__ : str = hidden_size UpperCAmelCase__ : Tuple = num_hidden_layers UpperCAmelCase__ : List[str] = num_attention_heads UpperCAmelCase__ : str = intermediate_size UpperCAmelCase__ : Tuple = hidden_act UpperCAmelCase__ : Tuple = hidden_dropout_prob UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Dict = type_sequence_label_size UpperCAmelCase__ : List[Any] = initializer_range # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase__ : str = (image_size // patch_size) ** 2 UpperCAmelCase__ : Tuple = num_patches + 1 def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Tuple = None if self.use_labels: UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : int = BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , ) return config, pixel_values, labels def lowercase_ ( self : int , _A : List[str] , _A : Tuple , _A : List[str] ): '''simple docstring''' UpperCAmelCase__ : Any = FlaxBeitModel(config=_A ) UpperCAmelCase__ : Tuple = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ ( self : Optional[Any] , _A : int , _A : List[Any] , _A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = FlaxBeitForMaskedImageModeling(config=_A ) UpperCAmelCase__ : Optional[int] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def lowercase_ ( self : List[Any] , _A : int , _A : Any , _A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.type_sequence_label_size UpperCAmelCase__ : Tuple = FlaxBeitForImageClassification(config=_A ) UpperCAmelCase__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase__ : Dict = 1 UpperCAmelCase__ : str = FlaxBeitForImageClassification(_A ) UpperCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ : List[Any] = model(_A ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Any = config_and_inputs UpperCAmelCase__ : Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = FlaxBeitModelTester(self ) UpperCAmelCase__ : Optional[int] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def lowercase_ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : str = model_class(_A ) UpperCAmelCase__ : List[str] = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : Tuple = [*signature.parameters.keys()] UpperCAmelCase__ : List[str] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : int = self._prepare_for_class(_A , _A ) UpperCAmelCase__ : Optional[int] = model_class(_A ) @jax.jit def model_jitted(_A : Dict , **_A : Optional[Any] ): return model(pixel_values=_A , **_A ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase__ : int = model_jitted(**_A ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase__ : Optional[Any] = model_jitted(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) ) for jitted_output, output in zip(_A , _A ): self.assertEqual(jitted_output.shape , output.shape ) def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @slow def lowercase_ ( self : List[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase__ : Tuple = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' ) UpperCAmelCase__ : List[str] = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(_A ) def a__ ( ) -> Any: UpperCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @require_flax class lowerCamelCase_ ( unittest.TestCase ): @cached_property def lowercase_ ( self : Any ): '''simple docstring''' return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None @slow def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[str] = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ) UpperCAmelCase__ : Any = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : Optional[Any] = image_processor(images=_A , return_tensors='''np''' ).pixel_values # prepare bool_masked_pos UpperCAmelCase__ : Optional[int] = np.ones((1, 196) , dtype=_A ) # forward pass UpperCAmelCase__ : Any = model(pixel_values=_A , bool_masked_pos=_A ) UpperCAmelCase__ : str = outputs.logits # verify the logits UpperCAmelCase__ : Optional[int] = (1, 196, 8_192) self.assertEqual(logits.shape , _A ) UpperCAmelCase__ : Optional[int] = np.array( [[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , _A , atol=1e-2 ) ) @slow def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ) UpperCAmelCase__ : List[str] = self.default_image_processor UpperCAmelCase__ : Dict = prepare_img() UpperCAmelCase__ : Union[str, Any] = image_processor(images=_A , return_tensors='''np''' ) # forward pass UpperCAmelCase__ : Optional[int] = model(**_A ) UpperCAmelCase__ : int = outputs.logits # verify the logits UpperCAmelCase__ : List[str] = (1, 1_000) self.assertEqual(logits.shape , _A ) UpperCAmelCase__ : List[str] = np.array([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ) self.assertTrue(np.allclose(logits[0, :3] , _A , atol=1e-4 ) ) UpperCAmelCase__ : List[str] = 281 self.assertEqual(logits.argmax(-1 ).item() , _A ) @slow def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ) UpperCAmelCase__ : Any = self.default_image_processor UpperCAmelCase__ : str = prepare_img() UpperCAmelCase__ : Dict = image_processor(images=_A , return_tensors='''np''' ) # forward pass UpperCAmelCase__ : Union[str, Any] = model(**_A ) UpperCAmelCase__ : Union[str, Any] = outputs.logits # verify the logits UpperCAmelCase__ : Optional[Any] = (1, 21_841) self.assertEqual(logits.shape , _A ) UpperCAmelCase__ : str = np.array([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ) self.assertTrue(np.allclose(logits[0, :3] , _A , atol=1e-4 ) ) UpperCAmelCase__ : Dict = 2_396 self.assertEqual(logits.argmax(-1 ).item() , _A )
75
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'torchsde'] def __init__( self : Tuple , *_A : Any , **_A : Optional[Any] ): '''simple docstring''' requires_backends(self , ['''torch''', '''torchsde'''] ) @classmethod def lowercase_ ( cls : List[Any] , *_A : Tuple , **_A : Tuple ): '''simple docstring''' requires_backends(cls , ['''torch''', '''torchsde'''] ) @classmethod def lowercase_ ( cls : List[str] , *_A : Optional[int] , **_A : Any ): '''simple docstring''' requires_backends(cls , ['''torch''', '''torchsde'''] )
75
1
'''simple docstring''' def a__ ( lowerCAmelCase__ = 10_00 ) -> int: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = 1, 1 UpperCAmelCase__ : Dict = 2 while True: UpperCAmelCase__ : Optional[int] = 0 UpperCAmelCase__ : Any = fa + fa UpperCAmelCase__ , UpperCAmelCase__ : str = fa, f index += 1 for _ in str(lowerCAmelCase__ ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
75
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'ctrl' lowerCAmelCase__ = ['past_key_values'] lowerCAmelCase__ = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : List[Any] , _A : Dict=246_534 , _A : Optional[Any]=256 , _A : Dict=1_280 , _A : List[str]=8_192 , _A : Tuple=48 , _A : Optional[Any]=16 , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=1e-6 , _A : Optional[int]=0.0_2 , _A : Tuple=True , **_A : Optional[Any] , ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = vocab_size UpperCAmelCase__ : Any = n_positions UpperCAmelCase__ : Optional[Any] = n_embd UpperCAmelCase__ : List[str] = n_layer UpperCAmelCase__ : Any = n_head UpperCAmelCase__ : int = dff UpperCAmelCase__ : str = resid_pdrop UpperCAmelCase__ : Tuple = embd_pdrop UpperCAmelCase__ : int = layer_norm_epsilon UpperCAmelCase__ : Tuple = initializer_range UpperCAmelCase__ : Union[str, Any] = use_cache super().__init__(**_A )
75
1
'''simple docstring''' import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS} def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" ) if tokenizer_name is None: UpperCAmelCase__ : str = TOKENIZER_CLASSES else: UpperCAmelCase__ : str = {tokenizer_name: getattr(lowerCAmelCase__ , tokenizer_name + '''Fast''' )} logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" ) for tokenizer_name in tokenizer_names: UpperCAmelCase__ : Tuple = TOKENIZER_CLASSES[tokenizer_name] UpperCAmelCase__ : str = True if checkpoint_name is None: UpperCAmelCase__ : str = list(tokenizer_class.max_model_input_sizes.keys() ) else: UpperCAmelCase__ : Tuple = [checkpoint_name] logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" ) for checkpoint in checkpoint_names: logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" ) # Load tokenizer UpperCAmelCase__ : Dict = tokenizer_class.from_pretrained(lowerCAmelCase__ , force_download=lowerCAmelCase__ ) # Save fast tokenizer logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" ) # For organization names we create sub-directories if "/" in checkpoint: UpperCAmelCase__ , UpperCAmelCase__ : Dict = checkpoint.split('''/''' ) UpperCAmelCase__ : List[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) elif add_prefix: UpperCAmelCase__ : Tuple = checkpoint UpperCAmelCase__ : Any = dump_path else: UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Dict = dump_path logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: UpperCAmelCase__ : Union[str, Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] UpperCAmelCase__ : Optional[int] = file_path.split(lowerCAmelCase__ )[-1][0] if next_char == "/": UpperCAmelCase__ : int = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = None logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) UpperCAmelCase__ : Optional[Any] = tokenizer.save_pretrained( lowerCAmelCase__ , legacy_format=lowerCAmelCase__ , filename_prefix=lowerCAmelCase__ ) logger.info(F"""=> File names {file_names}""" ) for file_name in file_names: if not file_name.endswith('''tokenizer.json''' ): os.remove(lowerCAmelCase__ ) logger.info(F"""=> removing {file_name}""" ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.''' ) parser.add_argument( '''--tokenizer_name''', default=None, type=str, help=( F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """ '''download and convert all the checkpoints from AWS.''' ), ) parser.add_argument( '''--checkpoint_name''', default=None, type=str, help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''', ) parser.add_argument( '''--force_download''', action='''store_true''', help='''Re-download checkpoints.''', ) UpperCamelCase__ = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
75
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'pytorch', 'script': 'run_ddp.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'tensorflow', 'script': 'run_tf_dist.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7}, }, ] ) class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : List[str] ): '''simple docstring''' if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , ) assert hasattr(self , '''env''' ) def lowercase_ ( self : List[Any] , _A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}""" # distributed data settings UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , ) def lowercase_ ( self : Optional[int] , _A : Any ): '''simple docstring''' TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(2,)] ) def lowercase_ ( self : Optional[int] , _A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A ) # run training estimator.fit() # result dataframe UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCAmelCase__ : Any = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
75
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'ctrl' lowerCAmelCase__ = ['past_key_values'] lowerCAmelCase__ = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : List[Any] , _A : Dict=246_534 , _A : Optional[Any]=256 , _A : Dict=1_280 , _A : List[str]=8_192 , _A : Tuple=48 , _A : Optional[Any]=16 , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=1e-6 , _A : Optional[int]=0.0_2 , _A : Tuple=True , **_A : Optional[Any] , ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = vocab_size UpperCAmelCase__ : Any = n_positions UpperCAmelCase__ : Optional[Any] = n_embd UpperCAmelCase__ : List[str] = n_layer UpperCAmelCase__ : Any = n_head UpperCAmelCase__ : int = dff UpperCAmelCase__ : str = resid_pdrop UpperCAmelCase__ : Tuple = embd_pdrop UpperCAmelCase__ : int = layer_norm_epsilon UpperCAmelCase__ : Tuple = initializer_range UpperCAmelCase__ : Union[str, Any] = use_cache super().__init__(**_A )
75
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets UpperCamelCase__ = '''\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } ''' UpperCamelCase__ = '''\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy. ''' UpperCamelCase__ = R''' Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting "1/2" to "\\frac{1}{2}") Examples: >>> metric = datasets.load_metric("competition_math") >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"]) >>> print(results) {\'accuracy\': 1.0} ''' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): def lowercase_ ( self : Dict ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , ) def lowercase_ ( self : Any , _A : str , _A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 0.0 for i, j in zip(_A , _A ): n_correct += 1.0 if math_equivalence.is_equiv(_A , _A ) else 0.0 UpperCAmelCase__ : Dict = n_correct / len(_A ) return { "accuracy": accuracy, }
75
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''', } class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'deta' lowerCAmelCase__ = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Dict , _A : str=None , _A : Any=900 , _A : str=2_048 , _A : str=6 , _A : Optional[Any]=2_048 , _A : int=8 , _A : List[Any]=6 , _A : Optional[Any]=1_024 , _A : Tuple=8 , _A : Optional[int]=0.0 , _A : Optional[Any]=True , _A : Union[str, Any]="relu" , _A : List[str]=256 , _A : str=0.1 , _A : Union[str, Any]=0.0 , _A : List[str]=0.0 , _A : int=0.0_2 , _A : Union[str, Any]=1.0 , _A : Dict=True , _A : Union[str, Any]=False , _A : Dict="sine" , _A : Dict=5 , _A : Optional[Any]=4 , _A : Tuple=4 , _A : Optional[Any]=True , _A : Dict=300 , _A : str=True , _A : Union[str, Any]=True , _A : Tuple=1 , _A : List[str]=5 , _A : List[str]=2 , _A : List[Any]=1 , _A : List[Any]=1 , _A : Union[str, Any]=5 , _A : str=2 , _A : Optional[Any]=0.1 , _A : Tuple=0.2_5 , **_A : int , ): '''simple docstring''' if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) UpperCAmelCase__ : Optional[int] = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] ) else: if isinstance(_A , _A ): UpperCAmelCase__ : str = backbone_config.pop('''model_type''' ) UpperCAmelCase__ : str = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase__ : Tuple = config_class.from_dict(_A ) UpperCAmelCase__ : List[str] = backbone_config UpperCAmelCase__ : List[str] = num_queries UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : Any = d_model UpperCAmelCase__ : Any = encoder_ffn_dim UpperCAmelCase__ : str = encoder_layers UpperCAmelCase__ : List[Any] = encoder_attention_heads UpperCAmelCase__ : Dict = decoder_ffn_dim UpperCAmelCase__ : List[Any] = decoder_layers UpperCAmelCase__ : Union[str, Any] = decoder_attention_heads UpperCAmelCase__ : Optional[int] = dropout UpperCAmelCase__ : Dict = attention_dropout UpperCAmelCase__ : Any = activation_dropout UpperCAmelCase__ : Tuple = activation_function UpperCAmelCase__ : Dict = init_std UpperCAmelCase__ : Tuple = init_xavier_std UpperCAmelCase__ : str = encoder_layerdrop UpperCAmelCase__ : Optional[int] = auxiliary_loss UpperCAmelCase__ : Dict = position_embedding_type # deformable attributes UpperCAmelCase__ : Tuple = num_feature_levels UpperCAmelCase__ : Optional[int] = encoder_n_points UpperCAmelCase__ : Optional[Any] = decoder_n_points UpperCAmelCase__ : List[str] = two_stage UpperCAmelCase__ : List[Any] = two_stage_num_proposals UpperCAmelCase__ : List[str] = with_box_refine UpperCAmelCase__ : Any = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher UpperCAmelCase__ : Union[str, Any] = class_cost UpperCAmelCase__ : Tuple = bbox_cost UpperCAmelCase__ : Any = giou_cost # Loss coefficients UpperCAmelCase__ : List[str] = mask_loss_coefficient UpperCAmelCase__ : List[Any] = dice_loss_coefficient UpperCAmelCase__ : Any = bbox_loss_coefficient UpperCAmelCase__ : Union[str, Any] = giou_loss_coefficient UpperCAmelCase__ : Any = eos_coefficient UpperCAmelCase__ : Tuple = focal_alpha super().__init__(is_encoder_decoder=_A , **_A ) @property def lowercase_ ( self : Any ): '''simple docstring''' return self.encoder_attention_heads @property def lowercase_ ( self : str ): '''simple docstring''' return self.d_model def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ : Tuple = self.backbone_config.to_dict() UpperCAmelCase__ : str = self.__class__.model_type return output
75
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase__ = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''', }, } UpperCamelCase__ = { '''facebook/bart-base''': 1_0_2_4, '''facebook/bart-large''': 1_0_2_4, '''facebook/bart-large-mnli''': 1_0_2_4, '''facebook/bart-large-cnn''': 1_0_2_4, '''facebook/bart-large-xsum''': 1_0_2_4, '''yjernite/bart_eli5''': 1_0_2_4, } class lowerCamelCase_ ( __a ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = ['input_ids', 'attention_mask'] lowerCAmelCase__ = BartTokenizer def __init__( self : Tuple , _A : List[str]=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Tuple="replace" , _A : Optional[Any]="<s>" , _A : int="</s>" , _A : Optional[Any]="</s>" , _A : List[str]="<s>" , _A : Optional[int]="<unk>" , _A : Optional[int]="<pad>" , _A : str="<mask>" , _A : Dict=False , _A : int=True , **_A : Optional[Any] , ): '''simple docstring''' super().__init__( _A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , ) UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space: UpperCAmelCase__ : str = getattr(_A , pre_tok_state.pop('''type''' ) ) UpperCAmelCase__ : Any = add_prefix_space UpperCAmelCase__ : str = pre_tok_class(**_A ) UpperCAmelCase__ : Dict = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` UpperCAmelCase__ : Optional[Any] = '''post_processor''' UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , _A , _A ) if tokenizer_component_instance: UpperCAmelCase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCAmelCase__ : Union[str, Any] = tuple(state['''sep'''] ) if "cls" in state: UpperCAmelCase__ : Union[str, Any] = tuple(state['''cls'''] ) UpperCAmelCase__ : Dict = False if state.get('''add_prefix_space''' , _A ) != add_prefix_space: UpperCAmelCase__ : Union[str, Any] = add_prefix_space UpperCAmelCase__ : Dict = True if state.get('''trim_offsets''' , _A ) != trim_offsets: UpperCAmelCase__ : List[Any] = trim_offsets UpperCAmelCase__ : List[Any] = True if changes_to_apply: UpperCAmelCase__ : Dict = getattr(_A , state.pop('''type''' ) ) UpperCAmelCase__ : Union[str, Any] = component_class(**_A ) setattr(self.backend_tokenizer , _A , _A ) @property def lowercase_ ( self : Dict ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def lowercase_ ( self : Dict , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value UpperCAmelCase__ : str = value def lowercase_ ( self : Optional[int] , *_A : List[str] , **_A : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = kwargs.get('''is_split_into_words''' , _A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*_A , **_A ) def lowercase_ ( self : Optional[Any] , *_A : Union[str, Any] , **_A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = kwargs.get('''is_split_into_words''' , _A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*_A , **_A ) def lowercase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ): '''simple docstring''' UpperCAmelCase__ : str = self._tokenizer.model.save(_A , name=_A ) return tuple(_A ) def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Optional[int]=None ): '''simple docstring''' UpperCAmelCase__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase_ ( self : int , _A : List[int] , _A : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [self.sep_token_id] UpperCAmelCase__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
75
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = tempfile.mkdtemp() UpperCAmelCase__ : Optional[int] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''的''', '''价''', '''格''', '''是''', '''15''', '''便''', '''alex''', '''##andra''', ''',''', '''。''', '''-''', '''t''', '''shirt''', ] UpperCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) UpperCAmelCase__ : Any = { '''do_resize''': True, '''size''': {'''height''': 224, '''width''': 224}, '''do_center_crop''': True, '''crop_size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], '''do_convert_rgb''': True, } UpperCAmelCase__ : int = os.path.join(self.tmpdirname , _A ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_A , _A ) def lowercase_ ( self : Optional[Any] , **_A : Optional[Any] ): '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : int , **_A : str ): '''simple docstring''' return BertTokenizerFast.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : Dict , **_A : Any ): '''simple docstring''' return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCAmelCase__ : str = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_tokenizer() UpperCAmelCase__ : Tuple = self.get_rust_tokenizer() UpperCAmelCase__ : Tuple = self.get_image_processor() UpperCAmelCase__ : List[Any] = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A ) processor_slow.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Dict = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_A ) UpperCAmelCase__ : List[Any] = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A ) processor_fast.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Dict = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _A ) self.assertIsInstance(processor_fast.tokenizer , _A ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _A ) self.assertIsInstance(processor_fast.image_processor , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Optional[Any] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' ) UpperCAmelCase__ : Any = self.get_image_processor(do_normalize=_A ) UpperCAmelCase__ : str = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_A ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.get_image_processor() UpperCAmelCase__ : List[Any] = self.get_tokenizer() UpperCAmelCase__ : str = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A ) UpperCAmelCase__ : Any = self.prepare_image_inputs() UpperCAmelCase__ : Optional[int] = image_processor(_A , return_tensors='''np''' ) UpperCAmelCase__ : Optional[int] = processor(images=_A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_image_processor() UpperCAmelCase__ : Any = self.get_tokenizer() UpperCAmelCase__ : Tuple = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A ) UpperCAmelCase__ : str = '''Alexandra,T-shirt的价格是15便士。''' UpperCAmelCase__ : List[str] = processor(text=_A ) UpperCAmelCase__ : Any = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_image_processor() UpperCAmelCase__ : List[Any] = self.get_tokenizer() UpperCAmelCase__ : int = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A ) UpperCAmelCase__ : Any = '''Alexandra,T-shirt的价格是15便士。''' UpperCAmelCase__ : int = self.prepare_image_inputs() UpperCAmelCase__ : Tuple = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(_A ): processor() def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_image_processor() UpperCAmelCase__ : List[str] = self.get_tokenizer() UpperCAmelCase__ : Tuple = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A ) UpperCAmelCase__ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase__ : List[Any] = processor.batch_decode(_A ) UpperCAmelCase__ : Tuple = tokenizer.batch_decode(_A ) self.assertListEqual(_A , _A ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.get_image_processor() UpperCAmelCase__ : List[str] = self.get_tokenizer() UpperCAmelCase__ : Tuple = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A ) UpperCAmelCase__ : int = '''Alexandra,T-shirt的价格是15便士。''' UpperCAmelCase__ : Optional[Any] = self.prepare_image_inputs() UpperCAmelCase__ : Tuple = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
75
'''simple docstring''' import random from typing import Any def a__ ( lowerCAmelCase__ ) -> list[Any]: for _ in range(len(lowerCAmelCase__ ) ): UpperCAmelCase__ : int = random.randint(0 , len(lowerCAmelCase__ ) - 1 ) UpperCAmelCase__ : Optional[int] = random.randint(0 , len(lowerCAmelCase__ ) - 1 ) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = data[b], data[a] return data if __name__ == "__main__": UpperCamelCase__ = [0, 1, 2, 3, 4, 5, 6, 7] UpperCamelCase__ = ['''python''', '''says''', '''hello''', '''!'''] print('''Fisher-Yates Shuffle:''') print('''List''', integers, strings) print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
75
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCamelCase__ = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase_ ( __a ): lowerCAmelCase__ = ['pixel_values'] def __init__( self : Any , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : bool = True , **_A : List[Any] , ): '''simple docstring''' super().__init__(**_A ) UpperCAmelCase__ : Optional[int] = size if size is not None else {'''shortest_edge''': 224} UpperCAmelCase__ : Union[str, Any] = get_size_dict(_A , default_to_square=_A ) UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase__ : Optional[Any] = get_size_dict(_A , default_to_square=_A , param_name='''crop_size''' ) UpperCAmelCase__ : int = do_resize UpperCAmelCase__ : Tuple = size UpperCAmelCase__ : Union[str, Any] = resample UpperCAmelCase__ : int = do_center_crop UpperCAmelCase__ : str = crop_size UpperCAmelCase__ : Union[str, Any] = do_rescale UpperCAmelCase__ : Optional[Any] = rescale_factor UpperCAmelCase__ : List[str] = do_normalize UpperCAmelCase__ : List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN UpperCAmelCase__ : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD UpperCAmelCase__ : Dict = do_convert_rgb def lowercase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ): '''simple docstring''' UpperCAmelCase__ : Any = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) UpperCAmelCase__ : List[Any] = get_resize_output_image_size(_A , size=size['''shortest_edge'''] , default_to_square=_A ) return resize(_A , size=_A , resample=_A , data_format=_A , **_A ) def lowercase_ ( self : Any , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A ) def lowercase_ ( self : str , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ): '''simple docstring''' return rescale(_A , scale=_A , data_format=_A , **_A ) def lowercase_ ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ): '''simple docstring''' return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def lowercase_ ( self : Optional[int] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : int = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : bool = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , **_A : int , ): '''simple docstring''' UpperCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize UpperCAmelCase__ : Optional[int] = size if size is not None else self.size UpperCAmelCase__ : List[Any] = get_size_dict(_A , param_name='''size''' , default_to_square=_A ) UpperCAmelCase__ : str = resample if resample is not None else self.resample UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase__ : Dict = get_size_dict(_A , param_name='''crop_size''' , default_to_square=_A ) UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase__ : Tuple = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase__ : Optional[Any] = image_mean if image_mean is not None else self.image_mean UpperCAmelCase__ : Tuple = image_std if image_std is not None else self.image_std UpperCAmelCase__ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb UpperCAmelCase__ : int = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: UpperCAmelCase__ : str = [convert_to_rgb(_A ) for image in images] # All transformations expect numpy arrays. UpperCAmelCase__ : Dict = [to_numpy_array(_A ) for image in images] if do_resize: UpperCAmelCase__ : Optional[Any] = [self.resize(image=_A , size=_A , resample=_A ) for image in images] if do_center_crop: UpperCAmelCase__ : List[Any] = [self.center_crop(image=_A , size=_A ) for image in images] if do_rescale: UpperCAmelCase__ : Optional[int] = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: UpperCAmelCase__ : Union[str, Any] = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] UpperCAmelCase__ : List[Any] = [to_channel_dimension_format(_A , _A ) for image in images] UpperCAmelCase__ : Tuple = {'''pixel_values''': images} return BatchFeature(data=_A , tensor_type=_A )
75
'''simple docstring''' import math def a__ ( lowerCAmelCase__ ) -> list[int]: UpperCAmelCase__ : List[Any] = [] UpperCAmelCase__ : Dict = 2 UpperCAmelCase__ : Optional[Any] = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment UpperCAmelCase__ : str = [True] * (end + 1) UpperCAmelCase__ : Any = [] while start <= end: if temp[start] is True: in_prime.append(lowerCAmelCase__ ) for i in range(start * start , end + 1 , lowerCAmelCase__ ): UpperCAmelCase__ : Dict = False start += 1 prime += in_prime UpperCAmelCase__ : Optional[int] = end + 1 UpperCAmelCase__ : str = min(2 * end , lowerCAmelCase__ ) while low <= n: UpperCAmelCase__ : List[str] = [True] * (high - low + 1) for each in in_prime: UpperCAmelCase__ : List[str] = math.floor(low / each ) * each if t < low: t += each for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ): UpperCAmelCase__ : Union[str, Any] = False for j in range(len(lowerCAmelCase__ ) ): if temp[j] is True: prime.append(j + low ) UpperCAmelCase__ : Union[str, Any] = high + 1 UpperCAmelCase__ : str = min(high + end , lowerCAmelCase__ ) return prime print(sieve(1_0**6))
75
1
'''simple docstring''' import argparse import os import re UpperCamelCase__ = '''src/diffusers''' # Pattern that looks at the indentation in a line. UpperCamelCase__ = re.compile(R'''^(\s*)\S''') # Pattern that matches `"key":" and puts `key` in group 0. UpperCamelCase__ = re.compile(R'''^\s*"([^"]+)":''') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. UpperCamelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''') # Pattern that matches `"key",` and puts `key` in group 0. UpperCamelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. UpperCamelCase__ = re.compile(R'''\[([^\]]+)\]''') def a__ ( lowerCAmelCase__ ) -> Optional[Any]: UpperCAmelCase__ : Optional[int] = _re_indent.search(lowerCAmelCase__ ) return "" if search is None else search.groups()[0] def a__ ( lowerCAmelCase__ , lowerCAmelCase__="" , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> List[Any]: UpperCAmelCase__ : int = 0 UpperCAmelCase__ : Union[str, Any] = code.split('''\n''' ) if start_prompt is not None: while not lines[index].startswith(lowerCAmelCase__ ): index += 1 UpperCAmelCase__ : Any = ['''\n'''.join(lines[:index] )] else: UpperCAmelCase__ : Union[str, Any] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). UpperCAmelCase__ : int = [lines[index]] index += 1 while index < len(lowerCAmelCase__ ) and (end_prompt is None or not lines[index].startswith(lowerCAmelCase__ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(lowerCAmelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ): current_block.append(lines[index] ) blocks.append('''\n'''.join(lowerCAmelCase__ ) ) if index < len(lowerCAmelCase__ ) - 1: UpperCAmelCase__ : str = [lines[index + 1]] index += 1 else: UpperCAmelCase__ : Optional[int] = [] else: blocks.append('''\n'''.join(lowerCAmelCase__ ) ) UpperCAmelCase__ : Dict = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(lowerCAmelCase__ ) > 0: blocks.append('''\n'''.join(lowerCAmelCase__ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(lowerCAmelCase__ ): blocks.append('''\n'''.join(lines[index:] ) ) return blocks def a__ ( lowerCAmelCase__ ) -> List[Any]: def _inner(lowerCAmelCase__ ): return key(lowerCAmelCase__ ).lower().replace('''_''' , '''''' ) return _inner def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> int: # If no key is provided, we use a noop. def noop(lowerCAmelCase__ ): return x if key is None: UpperCAmelCase__ : Tuple = noop # Constants are all uppercase, they go first. UpperCAmelCase__ : Optional[Any] = [obj for obj in objects if key(lowerCAmelCase__ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. UpperCAmelCase__ : int = [obj for obj in objects if key(lowerCAmelCase__ )[0].isupper() and not key(lowerCAmelCase__ ).isupper()] # Functions begin with a lowercase, they go last. UpperCAmelCase__ : Dict = [obj for obj in objects if not key(lowerCAmelCase__ )[0].isupper()] UpperCAmelCase__ : Optional[Any] = ignore_underscore(lowerCAmelCase__ ) return sorted(lowerCAmelCase__ , key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ , key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ , key=lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Any: # This inner function sort imports between [ ]. def _replace(lowerCAmelCase__ ): UpperCAmelCase__ : Optional[int] = match.groups()[0] if "," not in imports: return F"""[{imports}]""" UpperCAmelCase__ : Optional[int] = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: UpperCAmelCase__ : Tuple = keys[:-1] return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCAmelCase__ )] ) + "]" UpperCAmelCase__ : Tuple = import_statement.split('''\n''' ) if len(lowerCAmelCase__ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. UpperCAmelCase__ : Dict = 2 if lines[1].strip() == '''[''' else 1 UpperCAmelCase__ : int = [(i, _re_strip_line.search(lowerCAmelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] UpperCAmelCase__ : List[str] = sort_objects(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[1] ) UpperCAmelCase__ : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(lowerCAmelCase__ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: UpperCAmelCase__ : Optional[Any] = _re_bracket_content.sub(_replace , lines[1] ) else: UpperCAmelCase__ : Union[str, Any] = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: UpperCAmelCase__ : List[str] = keys[:-1] UpperCAmelCase__ : str = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCAmelCase__ )] ) return "\n".join(lowerCAmelCase__ ) else: # Finally we have to deal with imports fitting on one line UpperCAmelCase__ : Any = _re_bracket_content.sub(_replace , lowerCAmelCase__ ) return import_statement def a__ ( lowerCAmelCase__ , lowerCAmelCase__=True ) -> Tuple: with open(lowerCAmelCase__ , '''r''' ) as f: UpperCAmelCase__ : Dict = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 UpperCAmelCase__ : Tuple = split_code_in_indented_blocks( lowerCAmelCase__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(lowerCAmelCase__ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. UpperCAmelCase__ : List[str] = main_blocks[block_idx] UpperCAmelCase__ : Tuple = block.split('''\n''' ) # Get to the start of the imports. UpperCAmelCase__ : List[str] = 0 while line_idx < len(lowerCAmelCase__ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: UpperCAmelCase__ : Dict = len(lowerCAmelCase__ ) else: line_idx += 1 if line_idx >= len(lowerCAmelCase__ ): continue # Ignore beginning and last line: they don't contain anything. UpperCAmelCase__ : List[Any] = '''\n'''.join(block_lines[line_idx:-1] ) UpperCAmelCase__ : Tuple = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. UpperCAmelCase__ : List[Any] = split_code_in_indented_blocks(lowerCAmelCase__ , indent_level=lowerCAmelCase__ ) # We have two categories of import key: list or _import_structure[key].append/extend UpperCAmelCase__ : Tuple = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. UpperCAmelCase__ : Tuple = [(pattern.search(lowerCAmelCase__ ).groups()[0] if pattern.search(lowerCAmelCase__ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. UpperCAmelCase__ : Any = [(i, key) for i, key in enumerate(lowerCAmelCase__ ) if key is not None] UpperCAmelCase__ : Dict = [x[0] for x in sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. UpperCAmelCase__ : Tuple = 0 UpperCAmelCase__ : Tuple = [] for i in range(len(lowerCAmelCase__ ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: UpperCAmelCase__ : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(lowerCAmelCase__ ) count += 1 # And we put our main block back together with its first and last line. UpperCAmelCase__ : Union[str, Any] = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(lowerCAmelCase__ ): if check_only: return True else: print(F"""Overwriting {file}.""" ) with open(lowerCAmelCase__ , '''w''' ) as f: f.write('''\n'''.join(lowerCAmelCase__ ) ) def a__ ( lowerCAmelCase__=True ) -> Optional[Any]: UpperCAmelCase__ : str = [] for root, _, files in os.walk(lowerCAmelCase__ ): if "__init__.py" in files: UpperCAmelCase__ : Optional[int] = sort_imports(os.path.join(lowerCAmelCase__ , '''__init__.py''' ) , check_only=lowerCAmelCase__ ) if result: UpperCAmelCase__ : int = [os.path.join(lowerCAmelCase__ , '''__init__.py''' )] if len(lowerCAmelCase__ ) > 0: raise ValueError(F"""Would overwrite {len(lowerCAmelCase__ )} files, run `make style`.""" ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') UpperCamelCase__ = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
75
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ): lowerCAmelCase__ = StableDiffusionInpaintPipeline lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowerCAmelCase__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowerCAmelCase__ = frozenset([] ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , ) UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=_A ) torch.manual_seed(0 ) UpperCAmelCase__ : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , ) UpperCAmelCase__ : Union[str, Any] = CLIPTextModel(_A ) UpperCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCAmelCase__ : str = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowercase_ ( self : str , _A : Dict , _A : Any=0 ): '''simple docstring''' UpperCAmelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((64, 64) ) UpperCAmelCase__ : int = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) ) if str(_A ).startswith('''mps''' ): UpperCAmelCase__ : List[Any] = torch.manual_seed(_A ) else: UpperCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase__ : Optional[int] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': init_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : Tuple = self.get_dummy_components() UpperCAmelCase__ : str = StableDiffusionInpaintPipeline(**_A ) UpperCAmelCase__ : List[str] = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : Dict = self.get_dummy_inputs(_A ) UpperCAmelCase__ : Any = sd_pipe(**_A ).images UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : int = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self : Tuple ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : List[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase__ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase__ : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench.npy''' ) UpperCAmelCase__ : Dict = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase__ : str = torch.manual_seed(0 ) UpperCAmelCase__ : str = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , ) UpperCAmelCase__ : int = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 9e-3 def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase__ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase__ : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' ) UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase__ : Any = StableDiffusionInpaintPipeline.from_pretrained( _A , torch_dtype=torch.floataa , safety_checker=_A , ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , ) UpperCAmelCase__ : Tuple = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def lowercase_ ( self : Any ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase__ : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase__ : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase__ : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase__ : str = PNDMScheduler.from_pretrained(_A , subfolder='''scheduler''' ) UpperCAmelCase__ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained( _A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase__ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase__ : Tuple = torch.manual_seed(0 ) UpperCAmelCase__ : Any = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , ) UpperCAmelCase__ : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.6_5 * 10**9
75
1
'''simple docstring''' from math import factorial def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float: if successes > trials: raise ValueError('''successes must be lower or equal to trials''' ) if trials < 0 or successes < 0: raise ValueError('''the function is defined for non-negative integers''' ) if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError('''the function is defined for non-negative integers''' ) if not 0 < prob < 1: raise ValueError('''prob has to be in range of 1 - 0''' ) UpperCAmelCase__ : Any = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! UpperCAmelCase__ : Any = float(factorial(lowerCAmelCase__ ) ) coefficient /= factorial(lowerCAmelCase__ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('''Probability of 2 successes out of 4 trails''') print('''with probability of 0.75 is:''', end=''' ''') print(binomial_distribution(2, 4, 0.75))
75
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCamelCase__ = '''platform''' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Tuple: if attention_mask is None: UpperCAmelCase__ : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: UpperCAmelCase__ : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: UpperCAmelCase__ : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase__ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase__ : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class lowerCamelCase_ : def __init__( self : Optional[Any] , _A : Optional[Any] , _A : str=13 , _A : int=7 , _A : Any=True , _A : List[Any]=False , _A : Optional[int]=99 , _A : Optional[int]=16 , _A : int=2 , _A : Optional[int]=4 , _A : Optional[int]=4 , _A : int="gelu" , _A : List[str]=0.1 , _A : str=0.1 , _A : int=32 , _A : Optional[int]=2 , _A : int=1 , _A : Dict=0 , _A : Dict=0.0_2 , ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = parent UpperCAmelCase__ : str = batch_size UpperCAmelCase__ : Dict = seq_length UpperCAmelCase__ : str = is_training UpperCAmelCase__ : int = use_labels UpperCAmelCase__ : Union[str, Any] = vocab_size UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : Any = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : str = hidden_act UpperCAmelCase__ : str = hidden_dropout_prob UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase__ : Union[str, Any] = max_position_embeddings UpperCAmelCase__ : int = eos_token_id UpperCAmelCase__ : Optional[int] = pad_token_id UpperCAmelCase__ : List[str] = bos_token_id UpperCAmelCase__ : Union[str, Any] = initializer_range def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) UpperCAmelCase__ : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) UpperCAmelCase__ : List[Any] = shift_tokens_right(_A , 1 , 2 ) UpperCAmelCase__ : List[Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , ) UpperCAmelCase__ : Tuple = prepare_blenderbot_inputs_dict(_A , _A , _A ) return config, inputs_dict def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs() return config, inputs_dict def lowercase_ ( self : int , _A : List[Any] , _A : Optional[Any] , _A : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = 20 UpperCAmelCase__ : int = model_class_name(_A ) UpperCAmelCase__ : str = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase__ , UpperCAmelCase__ : Dict = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _A , _A ) UpperCAmelCase__ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) UpperCAmelCase__ : str = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase__ : str = model.decode( decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , ) UpperCAmelCase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase__ : Tuple = model.decode( decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , ) UpperCAmelCase__ : int = model.decode(_A , _A ) UpperCAmelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def lowercase_ ( self : Tuple , _A : List[Any] , _A : Tuple , _A : Tuple ): '''simple docstring''' UpperCAmelCase__ : Tuple = 20 UpperCAmelCase__ : Optional[int] = model_class_name(_A ) UpperCAmelCase__ : Optional[int] = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase__ : Any = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) UpperCAmelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A ) UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase__ : int = model.decode( decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , ) UpperCAmelCase__ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase__ : Any = model.decode( decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , ) UpperCAmelCase__ : List[str] = model.decode(_A , _A , decoder_attention_mask=_A ) UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class lowerCamelCase_ ( unittest.TestCase ): lowerCAmelCase__ = 9_9 def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) UpperCAmelCase__ : int = input_ids.shape[0] UpperCAmelCase__ : List[str] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._get_config_and_data() UpperCAmelCase__ : Any = FlaxBlenderbotForConditionalGeneration(_A ) UpperCAmelCase__ : Optional[int] = lm_model(input_ids=_A ) UpperCAmelCase__ : Dict = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) UpperCAmelCase__ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_A ) UpperCAmelCase__ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) UpperCAmelCase__ : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) UpperCAmelCase__ : Tuple = lm_model(input_ids=_A , decoder_input_ids=_A ) UpperCAmelCase__ : int = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) UpperCAmelCase__ : Union[str, Any] = shift_tokens_right(_A , 1 , 2 ) UpperCAmelCase__ : str = np.equal(_A , 1 ).astype(np.floataa ).sum() UpperCAmelCase__ : Dict = np.equal(_A , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(_A , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class lowerCamelCase_ ( __a , unittest.TestCase , __a ): lowerCAmelCase__ = True lowerCAmelCase__ = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = FlaxBlenderbotModelTester(self ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_A , _A , _A ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : Dict = self._prepare_for_class(_A , _A ) UpperCAmelCase__ : str = model_class(_A ) @jax.jit def encode_jitted(_A : Any , _A : Tuple=None , **_A : Optional[int] ): return model.encode(input_ids=_A , attention_mask=_A ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase__ : Optional[Any] = encode_jitted(**_A ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase__ : Tuple = encode_jitted(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) ) for jitted_output, output in zip(_A , _A ): self.assertEqual(jitted_output.shape , output.shape ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : List[str] = model_class(_A ) UpperCAmelCase__ : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) UpperCAmelCase__ : Tuple = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(_A : Optional[int] , _A : List[Any] , _A : int ): return model.decode( decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase__ : Any = decode_jitted(**_A ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase__ : Optional[int] = decode_jitted(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) ) for jitted_output, output in zip(_A , _A ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowercase_ ( self : List[str] ): '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids UpperCAmelCase__ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id UpperCAmelCase__ : Union[str, Any] = model(_A ) self.assertIsNotNone(_A ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25} UpperCAmelCase__ : int = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} UpperCAmelCase__ : str = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_A ) UpperCAmelCase__ : Optional[Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) UpperCAmelCase__ : Optional[Any] = ['''Sam'''] UpperCAmelCase__ : Dict = tokenizer(_A , return_tensors='''jax''' ) UpperCAmelCase__ : List[str] = model.generate(**_A , **_A ) UpperCAmelCase__ : Dict = '''Sam is a great name. It means "sun" in Gaelic.''' UpperCAmelCase__ : Any = tokenizer.batch_decode(_A , **_A ) assert generated_txt[0].strip() == tgt_text
75
1
'''simple docstring''' import os from pathlib import Path def a__ ( ) -> Union[str, Any]: from torch.utils.cpp_extension import load UpperCAmelCase__ : List[str] = Path(lowerCAmelCase__ ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr''' UpperCAmelCase__ : Union[str, Any] = [ root / filename for filename in [ '''vision.cpp''', os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ), os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ), ] ] load( '''MultiScaleDeformableAttention''' , lowerCAmelCase__ , with_cuda=lowerCAmelCase__ , extra_include_paths=[str(lowerCAmelCase__ )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[ '''-DCUDA_HAS_FP16=1''', '''-D__CUDA_NO_HALF_OPERATORS__''', '''-D__CUDA_NO_HALF_CONVERSIONS__''', '''-D__CUDA_NO_HALF2_OPERATORS__''', ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
75
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class lowerCamelCase_ ( datasets.BeamBasedBuilder ): def lowercase_ ( self : str ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_A , ) def lowercase_ ( self : int , _A : Optional[int] , _A : Optional[Any] ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )] def lowercase_ ( self : Union[str, Any] , _A : str , _A : Union[str, Any] ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_A ) class lowerCamelCase_ ( datasets.BeamBasedBuilder ): def lowercase_ ( self : Any ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_A , ) def lowercase_ ( self : Any , _A : List[str] , _A : Any ): '''simple docstring''' return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} ) ] def lowercase_ ( self : List[str] , _A : Optional[int] , _A : Tuple ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_A ) def a__ ( ) -> Tuple: return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] def a__ ( ) -> Optional[Any]: return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] class lowerCamelCase_ ( __a ): @require_beam def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Any = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) UpperCAmelCase__ : Union[str, Any] = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A ) self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def lowercase_ ( self : Any ): '''simple docstring''' import apache_beam as beam UpperCAmelCase__ : List[str] = beam.io.parquetio.WriteToParquet UpperCAmelCase__ : int = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Optional[int] = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' ) with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock: UpperCAmelCase__ : Dict = partial(_A , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( _A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertTrue( os.path.exists( os.path.join( _A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) ) self.assertTrue( os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def lowercase_ ( self : int ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Tuple = DummyBeamDataset(cache_dir=_A ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : int = NestedBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) ) UpperCAmelCase__ : Optional[int] = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A ) self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset
75
1
'''simple docstring''' import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[str]: UpperCAmelCase__ : str = OmegaConf.load(lowerCAmelCase__ ) if display: print(yaml.dump(OmegaConf.to_container(lowerCAmelCase__ ) ) ) return config def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Optional[Any]: if conf_path is None: UpperCAmelCase__ : Dict = '''./model_checkpoints/vqgan_only.yaml''' UpperCAmelCase__ : Optional[Any] = load_config(lowerCAmelCase__ , display=lowerCAmelCase__ ) UpperCAmelCase__ : int = VQModel(**config.model.params ) if ckpt_path is None: UpperCAmelCase__ : List[str] = '''./model_checkpoints/vqgan_only.pt''' UpperCAmelCase__ : Tuple = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ ) if ".ckpt" in ckpt_path: UpperCAmelCase__ : Tuple = sd['''state_dict'''] model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) del sd return model def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> int: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = model.encode(lowerCAmelCase__ ) print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" ) UpperCAmelCase__ : str = model.decode(lowerCAmelCase__ ) return xrec def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[int]: UpperCAmelCase__ , UpperCAmelCase__ : int = string.rsplit('''.''' , 1 ) if reload: UpperCAmelCase__ : Tuple = importlib.import_module(lowerCAmelCase__ ) importlib.reload(lowerCAmelCase__ ) return getattr(importlib.import_module(lowerCAmelCase__ , package=lowerCAmelCase__ ) , cls ) def a__ ( lowerCAmelCase__ ) -> List[str]: if "target" not in config: raise KeyError('''Expected key `target` to instantiate.''' ) return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True , lowerCAmelCase__=True ) -> Optional[int]: UpperCAmelCase__ : Tuple = instantiate_from_config(lowerCAmelCase__ ) if sd is not None: model.load_state_dict(lowerCAmelCase__ ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: # load the specified checkpoint if ckpt: UpperCAmelCase__ : Tuple = torch.load(lowerCAmelCase__ , map_location='''cpu''' ) UpperCAmelCase__ : Dict = pl_sd['''global_step'''] print(F"""loaded model from global step {global_step}.""" ) else: UpperCAmelCase__ : Optional[int] = {'''state_dict''': None} UpperCAmelCase__ : str = None UpperCAmelCase__ : Tuple = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=lowerCAmelCase__ , eval_mode=lowerCAmelCase__ )['''model'''] return model, global_step
75
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input UpperCamelCase__ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def a__ ( ) -> List[str]: UpperCAmelCase__ : Optional[int] = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: UpperCAmelCase__ : Any = get_sagemaker_input() else: UpperCAmelCase__ : List[str] = get_cluster_input() return config def a__ ( lowerCAmelCase__=None ) -> List[Any]: if subparsers is not None: UpperCAmelCase__ : Union[str, Any] = subparsers.add_parser('''config''' , description=lowerCAmelCase__ ) else: UpperCAmelCase__ : Dict = argparse.ArgumentParser('''Accelerate config command''' , description=lowerCAmelCase__ ) parser.add_argument( '''--config_file''' , default=lowerCAmelCase__ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=lowerCAmelCase__ ) return parser def a__ ( lowerCAmelCase__ ) -> List[Any]: UpperCAmelCase__ : List[Any] = get_user_input() if args.config_file is not None: UpperCAmelCase__ : Any = args.config_file else: if not os.path.isdir(lowerCAmelCase__ ): os.makedirs(lowerCAmelCase__ ) UpperCAmelCase__ : int = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(lowerCAmelCase__ ) else: config.to_yaml_file(lowerCAmelCase__ ) print(F"""accelerate configuration saved at {config_file}""" ) def a__ ( ) -> str: UpperCAmelCase__ : Optional[int] = config_command_parser() UpperCAmelCase__ : Any = parser.parse_args() config_command(lowerCAmelCase__ ) if __name__ == "__main__": main()
75
1
'''simple docstring''' # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class lowerCamelCase_ ( TensorFormatter[Mapping, 'torch.Tensor', Mapping] ): def __init__( self : Tuple , _A : Tuple=None , **_A : Tuple ): '''simple docstring''' super().__init__(features=_A ) UpperCAmelCase__ : List[str] = torch_tensor_kwargs import torch # noqa import torch at initialization def lowercase_ ( self : Tuple , _A : Any ): '''simple docstring''' import torch if isinstance(_A , _A ) and column: if all( isinstance(_A , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(_A ) return column def lowercase_ ( self : Any , _A : Tuple ): '''simple docstring''' import torch if isinstance(_A , (str, bytes, type(_A )) ): return value elif isinstance(_A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() UpperCAmelCase__ : List[Any] = {} if isinstance(_A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): UpperCAmelCase__ : str = {'''dtype''': torch.intaa} elif isinstance(_A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): UpperCAmelCase__ : Optional[int] = {'''dtype''': torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(_A , PIL.Image.Image ): UpperCAmelCase__ : List[str] = np.asarray(_A ) return torch.tensor(_A , **{**default_dtype, **self.torch_tensor_kwargs} ) def lowercase_ ( self : Union[str, Any] , _A : Dict ): '''simple docstring''' import torch # support for torch, tf, jax etc. if hasattr(_A , '''__array__''' ) and not isinstance(_A , torch.Tensor ): UpperCAmelCase__ : Tuple = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(_A , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] ) elif isinstance(_A , (list, tuple) ): return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] ) return self._tensorize(_A ) def lowercase_ ( self : List[str] , _A : dict ): '''simple docstring''' return map_nested(self._recursive_tensorize , _A , map_list=_A ) def lowercase_ ( self : Optional[int] , _A : pa.Table ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A ) UpperCAmelCase__ : List[str] = self.python_features_decoder.decode_row(_A ) return self.recursive_tensorize(_A ) def lowercase_ ( self : Dict , _A : pa.Table ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.numpy_arrow_extractor().extract_column(_A ) UpperCAmelCase__ : str = self.python_features_decoder.decode_column(_A , pa_table.column_names[0] ) UpperCAmelCase__ : Union[str, Any] = self.recursive_tensorize(_A ) UpperCAmelCase__ : List[str] = self._consolidate(_A ) return column def lowercase_ ( self : Dict , _A : pa.Table ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.numpy_arrow_extractor().extract_batch(_A ) UpperCAmelCase__ : Tuple = self.python_features_decoder.decode_batch(_A ) UpperCAmelCase__ : str = self.recursive_tensorize(_A ) for column_name in batch: UpperCAmelCase__ : List[Any] = self._consolidate(batch[column_name] ) return batch
75
'''simple docstring''' import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: # Construct model if gpta_config_file == "": UpperCAmelCase__ : Optional[Any] = GPTaConfig() else: UpperCAmelCase__ : Tuple = GPTaConfig.from_json_file(lowerCAmelCase__ ) UpperCAmelCase__ : Dict = GPTaModel(lowerCAmelCase__ ) # Load weights from numpy load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Save pytorch-model UpperCAmelCase__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME UpperCAmelCase__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , lowerCAmelCase__ ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--gpt2_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained OpenAI model. \n''' '''This specifies the model architecture.''' ), ) UpperCamelCase__ = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
75
1
'''simple docstring''' import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py UpperCamelCase__ = '''src/transformers''' UpperCamelCase__ = '''docs/source/en''' UpperCamelCase__ = '''.''' def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: UpperCAmelCase__ : Optional[Any] = f.readlines() # Find the start prompt. UpperCAmelCase__ : Union[str, Any] = 0 while not lines[start_index].startswith(lowerCAmelCase__ ): start_index += 1 start_index += 1 UpperCAmelCase__ : str = start_index while not lines[end_index].startswith(lowerCAmelCase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | UpperCamelCase__ = '''Model|Encoder|Decoder|ForConditionalGeneration''' # Regexes that match TF/Flax/PT model names. UpperCamelCase__ = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') UpperCamelCase__ = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. UpperCamelCase__ = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # This is to make sure the transformers module imported is the one in the repo. UpperCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH) def a__ ( lowerCAmelCase__ ) -> Tuple: UpperCAmelCase__ : Optional[int] = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowerCAmelCase__ ) return [m.group(0 ) for m in matches] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: UpperCAmelCase__ : int = 2 if text == '''✅''' or text == '''❌''' else len(lowerCAmelCase__ ) UpperCAmelCase__ : Optional[int] = (width - text_length) // 2 UpperCAmelCase__ : List[Any] = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def a__ ( ) -> Dict: UpperCAmelCase__ : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES UpperCAmelCase__ : int = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } UpperCAmelCase__ : Union[str, Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. UpperCAmelCase__ : str = collections.defaultdict(lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = collections.defaultdict(lowerCAmelCase__ ) UpperCAmelCase__ : Dict = collections.defaultdict(lowerCAmelCase__ ) UpperCAmelCase__ : Any = collections.defaultdict(lowerCAmelCase__ ) UpperCAmelCase__ : int = collections.defaultdict(lowerCAmelCase__ ) # Let's lookup through all transformers object (once). for attr_name in dir(lowerCAmelCase__ ): UpperCAmelCase__ : Optional[int] = None if attr_name.endswith('''Tokenizer''' ): UpperCAmelCase__ : List[Any] = slow_tokenizers UpperCAmelCase__ : Tuple = attr_name[:-9] elif attr_name.endswith('''TokenizerFast''' ): UpperCAmelCase__ : Dict = fast_tokenizers UpperCAmelCase__ : Tuple = attr_name[:-13] elif _re_tf_models.match(lowerCAmelCase__ ) is not None: UpperCAmelCase__ : List[Any] = tf_models UpperCAmelCase__ : Tuple = _re_tf_models.match(lowerCAmelCase__ ).groups()[0] elif _re_flax_models.match(lowerCAmelCase__ ) is not None: UpperCAmelCase__ : Optional[Any] = flax_models UpperCAmelCase__ : str = _re_flax_models.match(lowerCAmelCase__ ).groups()[0] elif _re_pt_models.match(lowerCAmelCase__ ) is not None: UpperCAmelCase__ : int = pt_models UpperCAmelCase__ : List[str] = _re_pt_models.match(lowerCAmelCase__ ).groups()[0] if lookup_dict is not None: while len(lowerCAmelCase__ ) > 0: if attr_name in model_name_to_prefix.values(): UpperCAmelCase__ : Tuple = True break # Try again after removing the last word in the name UpperCAmelCase__ : List[Any] = ''''''.join(camel_case_split(lowerCAmelCase__ )[:-1] ) # Let's build that table! UpperCAmelCase__ : List[Any] = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) UpperCAmelCase__ : str = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support'''] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). UpperCAmelCase__ : Tuple = [len(lowerCAmelCase__ ) + 2 for c in columns] UpperCAmelCase__ : Any = max([len(lowerCAmelCase__ ) for name in model_names] ) + 2 # Build the table per se UpperCAmelCase__ : Union[str, Any] = '''|''' + '''|'''.join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for c, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + '''|\n''' # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n" UpperCAmelCase__ : Dict = {True: '''✅''', False: '''❌'''} for name in model_names: UpperCAmelCase__ : Tuple = model_name_to_prefix[name] UpperCAmelCase__ : int = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for l, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + "|\n" return table def a__ ( lowerCAmelCase__=False ) -> Union[str, Any]: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = _find_text_in_file( filename=os.path.join(lowerCAmelCase__ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , ) UpperCAmelCase__ : int = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(lowerCAmelCase__ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( '''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') UpperCamelCase__ = parser.parse_args() check_model_table(args.fix_and_overwrite)
75
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCamelCase_ : def __init__( self : Optional[int] , _A : Optional[Any] , _A : Tuple=2 , _A : Tuple=3 , _A : Optional[Any]=4 , _A : List[Any]=2 , _A : List[Any]=7 , _A : int=True , _A : Dict=True , _A : int=True , _A : Dict=True , _A : Tuple=99 , _A : Union[str, Any]=36 , _A : int=2 , _A : List[str]=4 , _A : int=37 , _A : List[Any]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Tuple=2 , _A : Union[str, Any]=0.0_2 , _A : Any=6 , _A : Union[str, Any]=6 , _A : str=3 , _A : str=4 , _A : Tuple=None , _A : int=1_000 , ): '''simple docstring''' UpperCAmelCase__ : int = parent UpperCAmelCase__ : Optional[int] = batch_size UpperCAmelCase__ : str = num_channels UpperCAmelCase__ : str = image_size UpperCAmelCase__ : List[str] = patch_size UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : List[str] = use_input_mask UpperCAmelCase__ : Tuple = use_token_type_ids UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : int = vocab_size UpperCAmelCase__ : List[Any] = hidden_size UpperCAmelCase__ : Optional[int] = num_hidden_layers UpperCAmelCase__ : List[str] = num_attention_heads UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Dict = hidden_act UpperCAmelCase__ : int = hidden_dropout_prob UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase__ : List[str] = max_position_embeddings UpperCAmelCase__ : Tuple = type_vocab_size UpperCAmelCase__ : Any = type_sequence_label_size UpperCAmelCase__ : List[str] = initializer_range UpperCAmelCase__ : List[str] = coordinate_size UpperCAmelCase__ : Tuple = shape_size UpperCAmelCase__ : Optional[int] = num_labels UpperCAmelCase__ : Optional[Any] = num_choices UpperCAmelCase__ : Union[str, Any] = scope UpperCAmelCase__ : Optional[Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) UpperCAmelCase__ : str = text_seq_length UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1 UpperCAmelCase__ : Tuple = self.text_seq_length + self.image_seq_length def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) UpperCAmelCase__ : int = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase__ : str = bbox[i, j, 3] UpperCAmelCase__ : Dict = bbox[i, j, 1] UpperCAmelCase__ : str = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase__ : Optional[int] = bbox[i, j, 2] UpperCAmelCase__ : Any = bbox[i, j, 0] UpperCAmelCase__ : List[Any] = tmp_coordinate UpperCAmelCase__ : str = tf.constant(_A ) UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Any = None if self.use_input_mask: UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] ) UpperCAmelCase__ : Any = None if self.use_token_type_ids: UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : List[str] = None if self.use_labels: UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : int = TFLayoutLMvaModel(config=_A ) # text + image UpperCAmelCase__ : Tuple = model(_A , pixel_values=_A , training=_A ) UpperCAmelCase__ : Tuple = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , ) UpperCAmelCase__ : Optional[Any] = model(_A , bbox=_A , pixel_values=_A , training=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only UpperCAmelCase__ : Any = model(_A , training=_A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only UpperCAmelCase__ : str = model({'''pixel_values''': pixel_values} , training=_A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Any , _A : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.num_labels UpperCAmelCase__ : int = TFLayoutLMvaForSequenceClassification(config=_A ) UpperCAmelCase__ : Union[str, Any] = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self : Dict , _A : List[Any] , _A : Any , _A : Dict , _A : str , _A : Optional[int] , _A : str , _A : str ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.num_labels UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=_A ) UpperCAmelCase__ : Optional[int] = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowercase_ ( self : Dict , _A : Dict , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Tuple , _A : Dict , _A : str ): '''simple docstring''' UpperCAmelCase__ : str = 2 UpperCAmelCase__ : Dict = TFLayoutLMvaForQuestionAnswering(config=_A ) UpperCAmelCase__ : str = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : int = self.prepare_config_and_inputs() ((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = config_and_inputs UpperCAmelCase__ : List[Any] = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowerCAmelCase__ = ( {'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel} if is_tf_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Dict , _A : List[str] ): '''simple docstring''' return True def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Any , _A : Dict=False ): '''simple docstring''' UpperCAmelCase__ : List[Any] = copy.deepcopy(_A ) if model_class in get_values(_A ): UpperCAmelCase__ : Tuple = { k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(_A , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(_A ): UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_A ): UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_A ): UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_A ): UpperCAmelCase__ : int = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Any = TFLayoutLMvaModelTester(self ) UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 ) def lowercase_ ( self : str ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Optional[Any] = model_class(_A ) if getattr(_A , '''hf_compute_loss''' , _A ): # The number of elements in the loss should be the same as the number of elements in the label UpperCAmelCase__ : Tuple = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : List[Any] = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0] ] UpperCAmelCase__ : Optional[Any] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs UpperCAmelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' ) UpperCAmelCase__ : List[Any] = model(_A , **_A )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' ) if "labels" in prepared_for_class: UpperCAmelCase__ : Optional[Any] = prepared_for_class['''labels'''].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: UpperCAmelCase__ : Any = -100 UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(_A ) UpperCAmelCase__ : int = model(_A , **_A )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : Dict = model(_A )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple UpperCAmelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) # Get keys that were added with the _prepare_for_class function UpperCAmelCase__ : Optional[int] = prepared_for_class.keys() - inputs_dict.keys() UpperCAmelCase__ : int = inspect.signature(model.call ).parameters UpperCAmelCase__ : Union[str, Any] = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple UpperCAmelCase__ : Dict = {0: '''input_ids'''} for label_key in label_keys: UpperCAmelCase__ : str = signature_names.index(_A ) UpperCAmelCase__ : List[Any] = label_key UpperCAmelCase__ : Dict = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple UpperCAmelCase__ : Tuple = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: UpperCAmelCase__ : Any = prepared_for_class[value] UpperCAmelCase__ : Tuple = tuple(_A ) # Send to model UpperCAmelCase__ : Optional[Any] = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def lowercase_ ( self : int ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A ) def lowercase_ ( self : Tuple ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase__ : Union[str, Any] = type self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A ) def lowercase_ ( self : List[str] ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( _A , _A , _A , _A , _A , _A , _A ) def lowercase_ ( self : Any ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( _A , _A , _A , _A , _A , _A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( _A , _A , _A , _A , _A , _A , _A ) @slow def lowercase_ ( self : List[Any] ): '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def a__ ( ) -> List[str]: UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf class lowerCamelCase_ ( unittest.TestCase ): @cached_property def lowercase_ ( self : Dict ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None @slow def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ) UpperCAmelCase__ : Dict = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''tf''' ).pixel_values UpperCAmelCase__ : str = tf.constant([[1, 2]] ) UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass UpperCAmelCase__ : int = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A ) # verify the logits UpperCAmelCase__ : Optional[int] = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , _A ) UpperCAmelCase__ : Dict = tf.constant( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
75
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL UpperCamelCase__ = logging.get_logger(__name__) def a__ ( lowerCAmelCase__ ) -> List[List[ImageInput]]: if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowerCAmelCase__ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowerCAmelCase__ ): return [[videos]] raise ValueError(F"""Could not make batched video from {videos}""" ) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = ['pixel_values'] def __init__( self : str , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Dict , ): '''simple docstring''' super().__init__(**_A ) UpperCAmelCase__ : List[Any] = size if size is not None else {'''shortest_edge''': 256} UpperCAmelCase__ : Tuple = get_size_dict(_A , default_to_square=_A ) UpperCAmelCase__ : str = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase__ : Any = get_size_dict(_A , param_name='''crop_size''' ) UpperCAmelCase__ : Dict = do_resize UpperCAmelCase__ : Union[str, Any] = size UpperCAmelCase__ : int = do_center_crop UpperCAmelCase__ : List[Any] = crop_size UpperCAmelCase__ : Any = resample UpperCAmelCase__ : Dict = do_rescale UpperCAmelCase__ : Tuple = rescale_factor UpperCAmelCase__ : Union[str, Any] = offset UpperCAmelCase__ : List[str] = do_normalize UpperCAmelCase__ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase__ : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" in size: UpperCAmelCase__ : List[str] = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A ) elif "height" in size and "width" in size: UpperCAmelCase__ : str = (size['''height'''], size['''width''']) else: raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(_A , size=_A , resample=_A , data_format=_A , **_A ) def lowercase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ): '''simple docstring''' UpperCAmelCase__ : Dict = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A ) def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Union[int, float] , _A : bool = True , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ): '''simple docstring''' UpperCAmelCase__ : int = image.astype(np.floataa ) if offset: UpperCAmelCase__ : str = image - (scale / 2) return rescale(_A , scale=_A , data_format=_A , **_A ) def lowercase_ ( self : int , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ): '''simple docstring''' return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def lowercase_ ( self : str , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ): '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) if offset and not do_rescale: raise ValueError('''For offset, do_rescale must also be set to True.''' ) # All transformations expect numpy arrays. UpperCAmelCase__ : Optional[Any] = to_numpy_array(_A ) if do_resize: UpperCAmelCase__ : List[Any] = self.resize(image=_A , size=_A , resample=_A ) if do_center_crop: UpperCAmelCase__ : Union[str, Any] = self.center_crop(_A , size=_A ) if do_rescale: UpperCAmelCase__ : int = self.rescale(image=_A , scale=_A , offset=_A ) if do_normalize: UpperCAmelCase__ : Tuple = self.normalize(image=_A , mean=_A , std=_A ) UpperCAmelCase__ : Union[str, Any] = to_channel_dimension_format(_A , _A ) return image def lowercase_ ( self : int , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Any , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize UpperCAmelCase__ : List[str] = resample if resample is not None else self.resample UpperCAmelCase__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase__ : Tuple = offset if offset is not None else self.offset UpperCAmelCase__ : Any = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase__ : Any = image_mean if image_mean is not None else self.image_mean UpperCAmelCase__ : Dict = image_std if image_std is not None else self.image_std UpperCAmelCase__ : str = size if size is not None else self.size UpperCAmelCase__ : Dict = get_size_dict(_A , default_to_square=_A ) UpperCAmelCase__ : Optional[int] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase__ : int = get_size_dict(_A , param_name='''crop_size''' ) if not valid_images(_A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) UpperCAmelCase__ : Optional[int] = make_batched(_A ) UpperCAmelCase__ : List[str] = [ [ self._preprocess_image( image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , offset=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , ) for img in video ] for video in videos ] UpperCAmelCase__ : int = {'''pixel_values''': videos} return BatchFeature(data=_A , tensor_type=_A )
75
'''simple docstring''' import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": UpperCamelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') UpperCamelCase__ = F"""https://www.google.com/search?q={query}&num=100""" UpperCamelCase__ = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: UpperCamelCase__ = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: UpperCamelCase__ = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
75
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'torchsde'] def __init__( self : Tuple , *_A : Any , **_A : Optional[Any] ): '''simple docstring''' requires_backends(self , ['''torch''', '''torchsde'''] ) @classmethod def lowercase_ ( cls : List[Any] , *_A : Tuple , **_A : Tuple ): '''simple docstring''' requires_backends(cls , ['''torch''', '''torchsde'''] ) @classmethod def lowercase_ ( cls : List[str] , *_A : Optional[int] , **_A : Any ): '''simple docstring''' requires_backends(cls , ['''torch''', '''torchsde'''] )
75
'''simple docstring''' from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray: UpperCAmelCase__ : List[str] = cva.getAffineTransform(lowerCAmelCase__ , lowerCAmelCase__ ) return cva.warpAffine(lowerCAmelCase__ , lowerCAmelCase__ , (rows, cols) ) if __name__ == "__main__": # read original image UpperCamelCase__ = cva.imread( str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''') ) # turn image in gray scale value UpperCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape UpperCamelCase__ , UpperCamelCase__ = gray_img.shape # set different points to rotate image UpperCamelCase__ = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa) UpperCamelCase__ = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa) UpperCamelCase__ = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa) UpperCamelCase__ = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa) # add all rotated images in a list UpperCamelCase__ = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations UpperCamelCase__ = plt.figure(1) UpperCamelCase__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3'''] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''') plt.title(titles[i]) plt.axis('''off''') plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
75
1
'''simple docstring''' import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def a__ ( lowerCAmelCase__ ) -> int: monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() ) @pytest.fixture def a__ ( lowerCAmelCase__ ) -> Dict: class lowerCamelCase_ : def __init__( self : List[str] , _A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = metric_id class lowerCamelCase_ : lowerCAmelCase__ = [MetricMock(__a ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']] def lowercase_ ( self : List[Any] ): '''simple docstring''' return self._metrics monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() ) @pytest.mark.parametrize( '''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: if "tmp_path" in args: UpperCAmelCase__ : Any = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args ) with pytest.warns(lowerCAmelCase__ , match='''https://huggingface.co/docs/evaluate''' ): func(*lowerCAmelCase__ )
75
'''simple docstring''' from datetime import datetime as dt import os from github import Github UpperCamelCase__ = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''feature request''', '''new model''', '''wip''', ] def a__ ( ) -> List[str]: UpperCAmelCase__ : int = Github(os.environ['''GITHUB_TOKEN'''] ) UpperCAmelCase__ : List[Any] = g.get_repo('''huggingface/transformers''' ) UpperCAmelCase__ : List[str] = repo.get_issues(state='''open''' ) for issue in open_issues: UpperCAmelCase__ : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = comments[0] if len(lowerCAmelCase__ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
75
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig UpperCamelCase__ = { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''', } class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'albert' def __init__( self : Any , _A : List[Any]=30_000 , _A : Any=128 , _A : Optional[int]=4_096 , _A : Union[str, Any]=12 , _A : List[Any]=1 , _A : Optional[int]=64 , _A : str=16_384 , _A : Tuple=1 , _A : Any="gelu_new" , _A : Dict=0 , _A : Optional[int]=0 , _A : Dict=512 , _A : str=2 , _A : List[str]=0.0_2 , _A : List[Any]=1e-12 , _A : List[Any]=0.1 , _A : List[str]="absolute" , _A : Optional[Any]=0 , _A : int=2 , _A : Any=3 , **_A : List[Any] , ): '''simple docstring''' super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A ) UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : List[str] = embedding_size UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : Union[str, Any] = num_hidden_layers UpperCAmelCase__ : Dict = num_hidden_groups UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : str = inner_group_num UpperCAmelCase__ : str = hidden_act UpperCAmelCase__ : Dict = intermediate_size UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : int = attention_probs_dropout_prob UpperCAmelCase__ : List[str] = max_position_embeddings UpperCAmelCase__ : List[Any] = type_vocab_size UpperCAmelCase__ : Dict = initializer_range UpperCAmelCase__ : Union[str, Any] = layer_norm_eps UpperCAmelCase__ : Tuple = classifier_dropout_prob UpperCAmelCase__ : Optional[int] = position_embedding_type class lowerCamelCase_ ( __a ): @property def lowercase_ ( self : Any ): '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase__ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
75
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCamelCase_ ( __a ): def __init__( self : Dict , _A : List[str] , _A : int ): '''simple docstring''' super().__init__() self.register_modules(unet=_A , scheduler=_A ) @torch.no_grad() def __call__( self : List[Any] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ): '''simple docstring''' if audio_length_in_s is None: UpperCAmelCase__ : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate UpperCAmelCase__ : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate UpperCAmelCase__ : List[Any] = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) UpperCAmelCase__ : List[Any] = int(_A ) if sample_size % down_scale_factor != 0: UpperCAmelCase__ : int = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" ''' process.''' ) UpperCAmelCase__ : Dict = int(_A ) UpperCAmelCase__ : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype UpperCAmelCase__ : int = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(_A , _A ) and len(_A ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) UpperCAmelCase__ : Optional[int] = randn_tensor(_A , generator=_A , device=self.device , dtype=_A ) # set step values self.scheduler.set_timesteps(_A , device=audio.device ) UpperCAmelCase__ : List[str] = self.scheduler.timesteps.to(_A ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCAmelCase__ : Optional[int] = self.unet(_A , _A ).sample # 2. compute previous image: x_t -> t_t-1 UpperCAmelCase__ : List[Any] = self.scheduler.step(_A , _A , _A ).prev_sample UpperCAmelCase__ : Any = audio.clamp(-1 , 1 ).float().cpu().numpy() UpperCAmelCase__ : Any = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=_A )
75
1
'''simple docstring''' from __future__ import annotations UpperCamelCase__ = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } class lowerCamelCase_ : def __init__( self : Optional[Any] , _A : dict[str, list[str]] , _A : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = graph # mapping node to its parent in resulting breadth first tree UpperCAmelCase__ : dict[str, str | None] = {} UpperCAmelCase__ : List[Any] = source_vertex def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = {self.source_vertex} UpperCAmelCase__ : List[str] = None UpperCAmelCase__ : Optional[int] = [self.source_vertex] # first in first out queue while queue: UpperCAmelCase__ : int = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(_A ) UpperCAmelCase__ : str = vertex queue.append(_A ) def lowercase_ ( self : Dict , _A : str ): '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex UpperCAmelCase__ : List[str] = self.parent.get(_A ) if target_vertex_parent is None: UpperCAmelCase__ : str = ( f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}""" ) raise ValueError(_A ) return self.shortest_path(_A ) + f"""->{target_vertex}""" if __name__ == "__main__": UpperCamelCase__ = Graph(graph, '''G''') g.breath_first_search() print(g.shortest_path('''D''')) print(g.shortest_path('''G''')) print(g.shortest_path('''Foo'''))
75
'''simple docstring''' from math import factorial def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float: if successes > trials: raise ValueError('''successes must be lower or equal to trials''' ) if trials < 0 or successes < 0: raise ValueError('''the function is defined for non-negative integers''' ) if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError('''the function is defined for non-negative integers''' ) if not 0 < prob < 1: raise ValueError('''prob has to be in range of 1 - 0''' ) UpperCAmelCase__ : Any = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! UpperCAmelCase__ : Any = float(factorial(lowerCAmelCase__ ) ) coefficient /= factorial(lowerCAmelCase__ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('''Probability of 2 successes out of 4 trails''') print('''with probability of 0.75 is:''', end=''' ''') print(binomial_distribution(2, 4, 0.75))
75
1
'''simple docstring''' import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False, False, False @dataclass class lowerCamelCase_ : lowerCAmelCase__ = None lowerCAmelCase__ = True lowerCAmelCase__ = True lowerCAmelCase__ = None # Automatically constructed lowerCAmelCase__ = "dict" lowerCAmelCase__ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} ) lowerCAmelCase__ = field(default='Audio' , init=__a , repr=__a ) def __call__( self : Optional[int] ): '''simple docstring''' return self.pa_type def lowercase_ ( self : Dict , _A : Union[str, bytes, dict] ): '''simple docstring''' try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err if isinstance(_A , _A ): return {"bytes": None, "path": value} elif isinstance(_A , _A ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes UpperCAmelCase__ : Optional[int] = BytesIO() sf.write(_A , value['''array'''] , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith('''pcm''' ): # "PCM" only has raw audio bytes if value.get('''sampling_rate''' ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' ) if value.get('''bytes''' ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) UpperCAmelCase__ : Dict = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 32_767 else: UpperCAmelCase__ : Optional[int] = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 32_767 UpperCAmelCase__ : Tuple = BytesIO(bytes() ) sf.write(_A , _A , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get('''path''' )} elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )} else: raise ValueError( f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def lowercase_ ( self : Tuple , _A : dict , _A : Optional[Dict[str, Union[str, bool, None]]] = None ): '''simple docstring''' if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' ) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None) if path is None and file is None: raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err UpperCAmelCase__ : Optional[Any] = xsplitext(_A )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( '''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( '''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) if file is None: UpperCAmelCase__ : Tuple = token_per_repo_id or {} UpperCAmelCase__ : Optional[Any] = path.split('''::''' )[-1] try: UpperCAmelCase__ : int = string_to_dict(_A , config.HUB_DATASETS_URL )['''repo_id'''] UpperCAmelCase__ : Union[str, Any] = token_per_repo_id[repo_id] except (ValueError, KeyError): UpperCAmelCase__ : List[str] = None with xopen(_A , '''rb''' , use_auth_token=_A ) as f: UpperCAmelCase__ , UpperCAmelCase__ : str = sf.read(_A ) else: UpperCAmelCase__ , UpperCAmelCase__ : str = sf.read(_A ) UpperCAmelCase__ : int = array.T if self.mono: UpperCAmelCase__ : str = librosa.to_mono(_A ) if self.sampling_rate and self.sampling_rate != sampling_rate: UpperCAmelCase__ : Optional[Any] = librosa.resample(_A , orig_sr=_A , target_sr=self.sampling_rate ) UpperCAmelCase__ : int = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def lowercase_ ( self : Dict ): '''simple docstring''' from .features import Value if self.decode: raise ValueError('''Cannot flatten a decoded Audio feature.''' ) return { "bytes": Value('''binary''' ), "path": Value('''string''' ), } def lowercase_ ( self : str , _A : Union[pa.StringArray, pa.StructArray] ): '''simple docstring''' if pa.types.is_string(storage.type ): UpperCAmelCase__ : Optional[Any] = pa.array([None] * len(_A ) , type=pa.binary() ) UpperCAmelCase__ : List[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): UpperCAmelCase__ : Optional[int] = pa.array([None] * len(_A ) , type=pa.string() ) UpperCAmelCase__ : int = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ): UpperCAmelCase__ : List[Any] = pa.array([Audio().encode_example(_A ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('''bytes''' ) >= 0: UpperCAmelCase__ : Optional[Any] = storage.field('''bytes''' ) else: UpperCAmelCase__ : Any = pa.array([None] * len(_A ) , type=pa.binary() ) if storage.type.get_field_index('''path''' ) >= 0: UpperCAmelCase__ : List[Any] = storage.field('''path''' ) else: UpperCAmelCase__ : List[str] = pa.array([None] * len(_A ) , type=pa.string() ) UpperCAmelCase__ : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) return array_cast(_A , self.pa_type ) def lowercase_ ( self : Tuple , _A : pa.StructArray ): '''simple docstring''' @no_op_if_value_is_null def path_to_bytes(_A : Optional[Any] ): with xopen(_A , '''rb''' ) as f: UpperCAmelCase__ : List[str] = f.read() return bytes_ UpperCAmelCase__ : Any = pa.array( [ (path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) UpperCAmelCase__ : Tuple = pa.array( [os.path.basename(_A ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , ) UpperCAmelCase__ : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() ) return array_cast(_A , self.pa_type )
75
'''simple docstring''' from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCamelCase__ = logging.get_logger(__name__) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = ['pixel_values'] def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_A : int , ): '''simple docstring''' super().__init__(**_A ) UpperCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 224} UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A ) UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase__ : List[str] = get_size_dict(_A , param_name='''crop_size''' ) UpperCAmelCase__ : str = do_resize UpperCAmelCase__ : List[Any] = size UpperCAmelCase__ : int = resample UpperCAmelCase__ : int = do_center_crop UpperCAmelCase__ : List[str] = crop_size UpperCAmelCase__ : Union[str, Any] = do_rescale UpperCAmelCase__ : Optional[int] = rescale_factor UpperCAmelCase__ : List[Any] = do_normalize UpperCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN UpperCAmelCase__ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD def lowercase_ ( self : str , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = get_size_dict(_A , default_to_square=_A ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: UpperCAmelCase__ : Tuple = int((256 / 224) * size['''shortest_edge'''] ) UpperCAmelCase__ : Tuple = get_resize_output_image_size(_A , size=_A , default_to_square=_A ) UpperCAmelCase__ : Dict = {'''height''': output_size[0], '''width''': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" ) return resize( _A , size=(size_dict['''height'''], size_dict['''width''']) , resample=_A , data_format=_A , **_A ) def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A ) def lowercase_ ( self : List[str] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ): '''simple docstring''' return rescale(_A , scale=_A , data_format=_A , **_A ) def lowercase_ ( self : Dict , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ): '''simple docstring''' return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def lowercase_ ( self : Optional[Any] , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[TensorType] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ): '''simple docstring''' UpperCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize UpperCAmelCase__ : Optional[int] = resample if resample is not None else self.resample UpperCAmelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean UpperCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std UpperCAmelCase__ : Tuple = size if size is not None else self.size UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A ) UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase__ : int = get_size_dict(_A , param_name='''crop_size''' ) UpperCAmelCase__ : Union[str, Any] = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase__ : int = [to_numpy_array(_A ) for image in images] if do_resize: UpperCAmelCase__ : str = [self.resize(_A , _A , _A ) for image in images] if do_center_crop: UpperCAmelCase__ : Tuple = [self.center_crop(_A , _A ) for image in images] if do_rescale: UpperCAmelCase__ : Optional[int] = [self.rescale(_A , _A ) for image in images] if do_normalize: UpperCAmelCase__ : Any = [self.normalize(_A , _A , _A ) for image in images] UpperCAmelCase__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images] UpperCAmelCase__ : Dict = {'''pixel_values''': images} return BatchFeature(data=_A , tensor_type=_A )
75
1
'''simple docstring''' import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class lowerCamelCase_ ( unittest.TestCase ): lowerCAmelCase__ = JukeboxTokenizer lowerCAmelCase__ = { 'artist': 'Zac Brown Band', 'genres': 'Country', 'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ', } @require_torch def lowercase_ ( self : Optional[Any] ): '''simple docstring''' import torch UpperCAmelCase__ : Optional[int] = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' ) UpperCAmelCase__ : List[Any] = tokenizer(**self.metas )['''input_ids'''] # fmt: off UpperCAmelCase__ : List[str] = [ torch.tensor([[ 0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1_069, 11]] ), torch.tensor([[0, 0, 0, 1_069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def lowercase_ ( self : Dict ): '''simple docstring''' import torch UpperCAmelCase__ : Union[str, Any] = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' ) UpperCAmelCase__ : Optional[Any] = tokenizer(**self.metas )['''input_ids'''] # fmt: off UpperCAmelCase__ : str = [ torch.tensor([[ 0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
75
'''simple docstring''' import math def a__ ( ) -> None: UpperCAmelCase__ : List[str] = input('''Enter message: ''' ) UpperCAmelCase__ : Any = int(input(F"""Enter key [2-{len(lowerCAmelCase__ ) - 1}]: """ ) ) UpperCAmelCase__ : List[str] = input('''Encryption/Decryption [e/d]: ''' ) if mode.lower().startswith('''e''' ): UpperCAmelCase__ : Dict = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ ) elif mode.lower().startswith('''d''' ): UpperCAmelCase__ : Optional[int] = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(F"""Output:\n{text + "|"}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: UpperCAmelCase__ : Optional[int] = [''''''] * key for col in range(lowerCAmelCase__ ): UpperCAmelCase__ : Tuple = col while pointer < len(lowerCAmelCase__ ): cipher_text[col] += message[pointer] pointer += key return "".join(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: UpperCAmelCase__ : int = math.ceil(len(lowerCAmelCase__ ) / key ) UpperCAmelCase__ : Any = key UpperCAmelCase__ : Optional[int] = (num_cols * num_rows) - len(lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = [''''''] * num_cols UpperCAmelCase__ : List[str] = 0 UpperCAmelCase__ : List[Any] = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): UpperCAmelCase__ : Optional[int] = 0 row += 1 return "".join(lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
75
1
'''simple docstring''' from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 UpperCamelCase__ = { # 1536-bit 5: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''', base=1_6, ), '''generator''': 2, }, # 2048-bit 1_4: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''', base=1_6, ), '''generator''': 2, }, # 3072-bit 1_5: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''', base=1_6, ), '''generator''': 2, }, # 4096-bit 1_6: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199''' + '''FFFFFFFFFFFFFFFF''', base=1_6, ), '''generator''': 2, }, # 6144-bit 1_7: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08''' + '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B''' + '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9''' + '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6''' + '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8''' + '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C''' + '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718''' + '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D''' + '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D''' + '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226''' + '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC''' + '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26''' + '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB''' + '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2''' + '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127''' + '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406''' + '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918''' + '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151''' + '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03''' + '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F''' + '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B''' + '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632''' + '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E''' + '''6DCC4024FFFFFFFFFFFFFFFF''', base=1_6, ), '''generator''': 2, }, # 8192-bit 1_8: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD''' + '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831''' + '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B''' + '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF''' + '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6''' + '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3''' + '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328''' + '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C''' + '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE''' + '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4''' + '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300''' + '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568''' + '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9''' + '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B''' + '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A''' + '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36''' + '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1''' + '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92''' + '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47''' + '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71''' + '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''', base=1_6, ), '''generator''': 2, }, } class lowerCamelCase_ : def __init__( self : Optional[int] , _A : int = 14 ): '''simple docstring''' if group not in primes: raise ValueError('''Unsupported Group''' ) UpperCAmelCase__ : int = primes[group]['''prime'''] UpperCAmelCase__ : Optional[Any] = primes[group]['''generator'''] UpperCAmelCase__ : str = int(hexlify(urandom(32 ) ) , base=16 ) def lowercase_ ( self : List[str] ): '''simple docstring''' return hex(self.__private_key )[2:] def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = pow(self.generator , self.__private_key , self.prime ) return hex(_A )[2:] def lowercase_ ( self : Optional[Any] , _A : int ): '''simple docstring''' return ( 2 <= key <= self.prime - 2 and pow(_A , (self.prime - 1) // 2 , self.prime ) == 1 ) def lowercase_ ( self : List[Any] , _A : str ): '''simple docstring''' UpperCAmelCase__ : List[Any] = int(_A , base=16 ) if not self.is_valid_public_key(_A ): raise ValueError('''Invalid public key''' ) UpperCAmelCase__ : Union[str, Any] = pow(_A , self.__private_key , self.prime ) return shaaaa(str(_A ).encode() ).hexdigest() @staticmethod def lowercase_ ( _A : int , _A : int ): '''simple docstring''' return ( 2 <= remote_public_key_str <= prime - 2 and pow(_A , (prime - 1) // 2 , _A ) == 1 ) @staticmethod def lowercase_ ( _A : str , _A : str , _A : int = 14 ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = int(_A , base=16 ) UpperCAmelCase__ : List[Any] = int(_A , base=16 ) UpperCAmelCase__ : List[Any] = primes[group]['''prime'''] if not DiffieHellman.is_valid_public_key_static(_A , _A ): raise ValueError('''Invalid public key''' ) UpperCAmelCase__ : List[Any] = pow(_A , _A , _A ) return shaaaa(str(_A ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
75
'''simple docstring''' class lowerCamelCase_ : def __init__( self : str , _A : Union[str, Any] , _A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = name UpperCAmelCase__ : Union[str, Any] = val def __str__( self : Tuple ): '''simple docstring''' return f"""{self.__class__.__name__}({self.name}, {self.val})""" def __lt__( self : Union[str, Any] , _A : Dict ): '''simple docstring''' return self.val < other.val class lowerCamelCase_ : def __init__( self : int , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = {} UpperCAmelCase__ : int = {} UpperCAmelCase__ : Any = self.build_heap(_A ) def __getitem__( self : Any , _A : Any ): '''simple docstring''' return self.get_value(_A ) def lowercase_ ( self : Any , _A : List[Any] ): '''simple docstring''' return (idx - 1) // 2 def lowercase_ ( self : Union[str, Any] , _A : Optional[int] ): '''simple docstring''' return idx * 2 + 1 def lowercase_ ( self : Tuple , _A : List[Any] ): '''simple docstring''' return idx * 2 + 2 def lowercase_ ( self : List[str] , _A : Tuple ): '''simple docstring''' return self.heap_dict[key] def lowercase_ ( self : str , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = len(_A ) - 1 UpperCAmelCase__ : Tuple = self.get_parent_idx(_A ) for idx, i in enumerate(_A ): UpperCAmelCase__ : Dict = idx UpperCAmelCase__ : Optional[Any] = i.val for i in range(_A , -1 , -1 ): self.sift_down(_A , _A ) return array def lowercase_ ( self : Optional[Any] , _A : str , _A : List[Any] ): '''simple docstring''' while True: UpperCAmelCase__ : Any = self.get_left_child_idx(_A ) # noqa: E741 UpperCAmelCase__ : Optional[Any] = self.get_right_child_idx(_A ) UpperCAmelCase__ : Tuple = idx if l < len(_A ) and array[l] < array[idx]: UpperCAmelCase__ : int = l if r < len(_A ) and array[r] < array[smallest]: UpperCAmelCase__ : Dict = r if smallest != idx: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = array[smallest], array[idx] ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : List[str] = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) UpperCAmelCase__ : str = smallest else: break def lowercase_ ( self : List[str] , _A : int ): '''simple docstring''' UpperCAmelCase__ : str = self.get_parent_idx(_A ) while p >= 0 and self.heap[p] > self.heap[idx]: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.heap[idx], self.heap[p] UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) UpperCAmelCase__ : Union[str, Any] = p UpperCAmelCase__ : List[Any] = self.get_parent_idx(_A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' return self.heap[0] def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Any = self.heap[-1], self.heap[0] UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) UpperCAmelCase__ : int = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def lowercase_ ( self : int , _A : Union[str, Any] ): '''simple docstring''' self.heap.append(_A ) UpperCAmelCase__ : Union[str, Any] = len(self.heap ) - 1 UpperCAmelCase__ : Optional[Any] = node.val self.sift_up(len(self.heap ) - 1 ) def lowercase_ ( self : str ): '''simple docstring''' return len(self.heap ) == 0 def lowercase_ ( self : int , _A : Optional[Any] , _A : str ): '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" UpperCAmelCase__ : Optional[Any] = new_value UpperCAmelCase__ : List[str] = new_value self.sift_up(self.idx_of_element[node] ) UpperCamelCase__ = Node('''R''', -1) UpperCamelCase__ = Node('''B''', 6) UpperCamelCase__ = Node('''A''', 3) UpperCamelCase__ = Node('''X''', 1) UpperCamelCase__ = Node('''E''', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array UpperCamelCase__ = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('''Min Heap - before decrease key''') for i in my_min_heap.heap: print(i) print('''Min Heap - After decrease key of node [B -> -17]''') my_min_heap.decrease_key(b, -1_7) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
75
1
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class lowerCamelCase_ : def __init__( self : Dict , _A : str=2 , _A : Tuple=3 , _A : Tuple=64 , _A : str=None ): '''simple docstring''' UpperCAmelCase__ : int = np.random.default_rng(_A ) UpperCAmelCase__ : Any = length UpperCAmelCase__ : int = rng.normal(size=(length,) ).astype(np.floataa ) UpperCAmelCase__ : Optional[Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self : Tuple ): '''simple docstring''' return self.length def __getitem__( self : Union[str, Any] , _A : Optional[Any] ): '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class lowerCamelCase_ ( torch.nn.Module ): def __init__( self : str , _A : List[str]=0 , _A : List[Any]=0 , _A : int=False ): '''simple docstring''' super().__init__() UpperCAmelCase__ : Dict = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) UpperCAmelCase__ : Dict = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) UpperCAmelCase__ : List[Any] = True def lowercase_ ( self : Any , _A : List[Any]=None ): '''simple docstring''' if self.first_batch: print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) UpperCAmelCase__ : Any = False return x * self.a[0] + self.b[0] class lowerCamelCase_ ( torch.nn.Module ): def __init__( self : Optional[int] , _A : List[str]=0 , _A : Optional[int]=0 , _A : Any=False ): '''simple docstring''' super().__init__() UpperCAmelCase__ : Dict = torch.nn.Parameter(torch.tensor(_A ).float() ) UpperCAmelCase__ : Optional[Any] = torch.nn.Parameter(torch.tensor(_A ).float() ) UpperCAmelCase__ : str = True def lowercase_ ( self : str , _A : List[Any]=None ): '''simple docstring''' if self.first_batch: print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) UpperCAmelCase__ : int = False return x * self.a + self.b def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = 16 ) -> Dict: from datasets import load_dataset from transformers import AutoTokenizer UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''bert-base-cased''' ) UpperCAmelCase__ : Dict = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} UpperCAmelCase__ : str = load_dataset('''csv''' , data_files=lowerCAmelCase__ ) UpperCAmelCase__ : Dict = datasets['''train'''].unique('''label''' ) UpperCAmelCase__ : Union[str, Any] = {v: i for i, v in enumerate(lowerCAmelCase__ )} def tokenize_function(lowerCAmelCase__ ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase__ : Any = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' ) if "label" in examples: UpperCAmelCase__ : Union[str, Any] = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCAmelCase__ : Optional[int] = datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(lowerCAmelCase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' ) return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. UpperCAmelCase__ : Optional[Any] = DataLoader(tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=2 ) UpperCAmelCase__ : Tuple = DataLoader(tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1 ) return train_dataloader, eval_dataloader
75
'''simple docstring''' import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py UpperCamelCase__ = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. UpperCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS) UpperCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` UpperCamelCase__ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') UpperCamelCase__ = { '''DecisionTransformerConfig''', '''EncoderDecoderConfig''', '''MusicgenConfig''', '''RagConfig''', '''SpeechEncoderDecoderConfig''', '''TimmBackboneConfig''', '''VisionEncoderDecoderConfig''', '''VisionTextDualEncoderConfig''', '''LlamaConfig''', } def a__ ( lowerCAmelCase__ ) -> List[str]: UpperCAmelCase__ : str = None # source code of `config_class` UpperCAmelCase__ : str = inspect.getsource(lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = _re_checkpoint.findall(lowerCAmelCase__ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('''/''' ): UpperCAmelCase__ : List[str] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link UpperCAmelCase__ : Union[str, Any] = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: UpperCAmelCase__ : Any = ckpt_name break return checkpoint def a__ ( ) -> Dict: UpperCAmelCase__ : Optional[Any] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue UpperCAmelCase__ : Any = get_checkpoint_from_config_class(lowerCAmelCase__ ) UpperCAmelCase__ : Optional[int] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0: UpperCAmelCase__ : List[str] = '''\n'''.join(sorted(lowerCAmelCase__ ) ) raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
75
1
'''simple docstring''' import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version UpperCamelCase__ = version.parse(importlib_metadata.version('''nltk''')) if NLTK_VERSION >= version.Version('''3.6.4'''): from nltk import word_tokenize UpperCamelCase__ = '''\ @inproceedings{banarjee2005, title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments}, author = {Banerjee, Satanjeev and Lavie, Alon}, booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization}, month = jun, year = {2005}, address = {Ann Arbor, Michigan}, publisher = {Association for Computational Linguistics}, url = {https://www.aclweb.org/anthology/W05-0909}, pages = {65--72}, } ''' UpperCamelCase__ = '''\ METEOR, an automatic metric for machine translation evaluation that is based on a generalized concept of unigram matching between the machine-produced translation and human-produced reference translations. Unigrams can be matched based on their surface forms, stemmed forms, and meanings; furthermore, METEOR can be easily extended to include more advanced matching strategies. Once all generalized unigram matches between the two strings have been found, METEOR computes a score for this matching using a combination of unigram-precision, unigram-recall, and a measure of fragmentation that is designed to directly capture how well-ordered the matched words in the machine translation are in relation to the reference. METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic data and 0.331 on the Chinese data. This is shown to be an improvement on using simply unigram-precision, unigram-recall and their harmonic F1 combination. ''' UpperCamelCase__ = ''' Computes METEOR score of translated segments against one or more references. Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. alpha: Parameter for controlling relative weights of precision and recall. default: 0.9 beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3 gamma: Relative weight assigned to fragmentation penalty. default: 0.5 Returns: \'meteor\': meteor score. Examples: >>> meteor = datasets.load_metric(\'meteor\') >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"] >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"] >>> results = meteor.compute(predictions=predictions, references=references) >>> print(round(results["meteor"], 4)) 0.6944 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): def lowercase_ ( self : List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[ '''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''', '''https://en.wikipedia.org/wiki/METEOR''', ] , ) def lowercase_ ( self : str , _A : Optional[Any] ): '''simple docstring''' import nltk nltk.download('''wordnet''' ) if NLTK_VERSION >= version.Version('''3.6.5''' ): nltk.download('''punkt''' ) if NLTK_VERSION >= version.Version('''3.6.6''' ): nltk.download('''omw-1.4''' ) def lowercase_ ( self : str , _A : Dict , _A : Optional[Any] , _A : Tuple=0.9 , _A : Optional[int]=3 , _A : Tuple=0.5 ): '''simple docstring''' if NLTK_VERSION >= version.Version('''3.6.5''' ): UpperCAmelCase__ : Union[str, Any] = [ meteor_score.single_meteor_score( word_tokenize(_A ) , word_tokenize(_A ) , alpha=_A , beta=_A , gamma=_A ) for ref, pred in zip(_A , _A ) ] else: UpperCAmelCase__ : Tuple = [ meteor_score.single_meteor_score(_A , _A , alpha=_A , beta=_A , gamma=_A ) for ref, pred in zip(_A , _A ) ] return {"meteor": np.mean(_A )}
75
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'torchsde'] def __init__( self : Tuple , *_A : Any , **_A : Optional[Any] ): '''simple docstring''' requires_backends(self , ['''torch''', '''torchsde'''] ) @classmethod def lowercase_ ( cls : List[Any] , *_A : Tuple , **_A : Tuple ): '''simple docstring''' requires_backends(cls , ['''torch''', '''torchsde'''] ) @classmethod def lowercase_ ( cls : List[str] , *_A : Optional[int] , **_A : Any ): '''simple docstring''' requires_backends(cls , ['''torch''', '''torchsde'''] )
75
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase__ = { '''configuration_mobilenet_v2''': [ '''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileNetV2Config''', '''MobileNetV2OnnxConfig''', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ['''MobileNetV2FeatureExtractor'''] UpperCamelCase__ = ['''MobileNetV2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileNetV2ForImageClassification''', '''MobileNetV2ForSemanticSegmentation''', '''MobileNetV2Model''', '''MobileNetV2PreTrainedModel''', '''load_tf_weights_in_mobilenet_v2''', ] if TYPE_CHECKING: from .configuration_mobilenet_va import ( MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetVaConfig, MobileNetVaOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor from .image_processing_mobilenet_va import MobileNetVaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilenet_va import ( MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel, MobileNetVaPreTrainedModel, load_tf_weights_in_mobilenet_va, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
75
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'ctrl' lowerCAmelCase__ = ['past_key_values'] lowerCAmelCase__ = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : List[Any] , _A : Dict=246_534 , _A : Optional[Any]=256 , _A : Dict=1_280 , _A : List[str]=8_192 , _A : Tuple=48 , _A : Optional[Any]=16 , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=1e-6 , _A : Optional[int]=0.0_2 , _A : Tuple=True , **_A : Optional[Any] , ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = vocab_size UpperCAmelCase__ : Any = n_positions UpperCAmelCase__ : Optional[Any] = n_embd UpperCAmelCase__ : List[str] = n_layer UpperCAmelCase__ : Any = n_head UpperCAmelCase__ : int = dff UpperCAmelCase__ : str = resid_pdrop UpperCAmelCase__ : Tuple = embd_pdrop UpperCAmelCase__ : int = layer_norm_epsilon UpperCAmelCase__ : Tuple = initializer_range UpperCAmelCase__ : Union[str, Any] = use_cache super().__init__(**_A )
75
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ = { '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
75
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'pytorch', 'script': 'run_ddp.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'tensorflow', 'script': 'run_tf_dist.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7}, }, ] ) class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : List[str] ): '''simple docstring''' if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , ) assert hasattr(self , '''env''' ) def lowercase_ ( self : List[Any] , _A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}""" # distributed data settings UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , ) def lowercase_ ( self : Optional[int] , _A : Any ): '''simple docstring''' TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(2,)] ) def lowercase_ ( self : Optional[int] , _A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A ) # run training estimator.fit() # result dataframe UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCAmelCase__ : Any = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
75
1
'''simple docstring''' import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py UpperCamelCase__ = '''src/transformers''' UpperCamelCase__ = '''docs/source/en/tasks''' def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: UpperCAmelCase__ : int = f.readlines() # Find the start prompt. UpperCAmelCase__ : Optional[Any] = 0 while not lines[start_index].startswith(lowerCAmelCase__ ): start_index += 1 start_index += 1 UpperCAmelCase__ : Tuple = start_index while not lines[end_index].startswith(lowerCAmelCase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. UpperCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH) UpperCamelCase__ = { '''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, '''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, '''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, '''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, '''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, '''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, '''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, '''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, '''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, '''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, '''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, '''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, '''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, '''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). UpperCamelCase__ = { '''summarization.md''': ('''nllb''',), '''translation.md''': ('''nllb''',), } def a__ ( lowerCAmelCase__ ) -> Optional[Any]: UpperCAmelCase__ : int = TASK_GUIDE_TO_MODELS[task_guide] UpperCAmelCase__ : List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowerCAmelCase__ , set() ) UpperCAmelCase__ : List[Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[Any]: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = _find_text_in_file( filename=os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , ) UpperCAmelCase__ : Any = get_model_list_for_task(lowerCAmelCase__ ) if current_list != new_list: if overwrite: with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" ''' to fix this.''' ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') UpperCamelCase__ = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
75
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets UpperCamelCase__ = '''\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } ''' UpperCamelCase__ = '''\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy. ''' UpperCamelCase__ = R''' Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting "1/2" to "\\frac{1}{2}") Examples: >>> metric = datasets.load_metric("competition_math") >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"]) >>> print(results) {\'accuracy\': 1.0} ''' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): def lowercase_ ( self : Dict ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , ) def lowercase_ ( self : Any , _A : str , _A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 0.0 for i, j in zip(_A , _A ): n_correct += 1.0 if math_equivalence.is_equiv(_A , _A ) else 0.0 UpperCAmelCase__ : Dict = n_correct / len(_A ) return { "accuracy": accuracy, }
75
1
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig UpperCamelCase__ = logging.get_logger(__name__) # General docstring UpperCamelCase__ = '''RegNetConfig''' # Base docstring UpperCamelCase__ = '''facebook/regnet-y-040''' UpperCamelCase__ = [1, 1_0_8_8, 7, 7] # Image classification docstring UpperCamelCase__ = '''facebook/regnet-y-040''' UpperCamelCase__ = '''tabby, tabby cat''' UpperCamelCase__ = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCamelCase_ ( nn.Module ): def __init__( self : Optional[Any] , _A : int , _A : int , _A : int = 3 , _A : int = 1 , _A : int = 1 , _A : Optional[str] = "relu" , ): '''simple docstring''' super().__init__() UpperCAmelCase__ : Optional[Any] = nn.Convad( _A , _A , kernel_size=_A , stride=_A , padding=kernel_size // 2 , groups=_A , bias=_A , ) UpperCAmelCase__ : str = nn.BatchNormad(_A ) UpperCAmelCase__ : str = ACTaFN[activation] if activation is not None else nn.Identity() def lowercase_ ( self : str , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.convolution(_A ) UpperCAmelCase__ : str = self.normalization(_A ) UpperCAmelCase__ : Optional[int] = self.activation(_A ) return hidden_state class lowerCamelCase_ ( nn.Module ): def __init__( self : Tuple , _A : RegNetConfig ): '''simple docstring''' super().__init__() UpperCAmelCase__ : Optional[Any] = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) UpperCAmelCase__ : List[str] = config.num_channels def lowercase_ ( self : Dict , _A : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) UpperCAmelCase__ : Tuple = self.embedder(_A ) return hidden_state class lowerCamelCase_ ( nn.Module ): def __init__( self : str , _A : int , _A : int , _A : int = 2 ): '''simple docstring''' super().__init__() UpperCAmelCase__ : List[str] = nn.Convad(_A , _A , kernel_size=1 , stride=_A , bias=_A ) UpperCAmelCase__ : Optional[int] = nn.BatchNormad(_A ) def lowercase_ ( self : int , _A : Tensor ): '''simple docstring''' UpperCAmelCase__ : int = self.convolution(_A ) UpperCAmelCase__ : List[Any] = self.normalization(_A ) return hidden_state class lowerCamelCase_ ( nn.Module ): def __init__( self : List[str] , _A : int , _A : int ): '''simple docstring''' super().__init__() UpperCAmelCase__ : List[str] = nn.AdaptiveAvgPoolad((1, 1) ) UpperCAmelCase__ : Dict = nn.Sequential( nn.Convad(_A , _A , kernel_size=1 ) , nn.ReLU() , nn.Convad(_A , _A , kernel_size=1 ) , nn.Sigmoid() , ) def lowercase_ ( self : List[str] , _A : str ): '''simple docstring''' UpperCAmelCase__ : Any = self.pooler(_A ) UpperCAmelCase__ : Optional[Any] = self.attention(_A ) UpperCAmelCase__ : int = hidden_state * attention return hidden_state class lowerCamelCase_ ( nn.Module ): def __init__( self : int , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 ): '''simple docstring''' super().__init__() UpperCAmelCase__ : str = in_channels != out_channels or stride != 1 UpperCAmelCase__ : Optional[Any] = max(1 , out_channels // config.groups_width ) UpperCAmelCase__ : List[str] = ( RegNetShortCut(_A , _A , stride=_A ) if should_apply_shortcut else nn.Identity() ) UpperCAmelCase__ : Union[str, Any] = nn.Sequential( RegNetConvLayer(_A , _A , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_A , _A , stride=_A , groups=_A , activation=config.hidden_act ) , RegNetConvLayer(_A , _A , kernel_size=1 , activation=_A ) , ) UpperCAmelCase__ : Optional[int] = ACTaFN[config.hidden_act] def lowercase_ ( self : List[str] , _A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : int = hidden_state UpperCAmelCase__ : List[Any] = self.layer(_A ) UpperCAmelCase__ : List[Any] = self.shortcut(_A ) hidden_state += residual UpperCAmelCase__ : str = self.activation(_A ) return hidden_state class lowerCamelCase_ ( nn.Module ): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 ): '''simple docstring''' super().__init__() UpperCAmelCase__ : Optional[int] = in_channels != out_channels or stride != 1 UpperCAmelCase__ : Dict = max(1 , out_channels // config.groups_width ) UpperCAmelCase__ : Union[str, Any] = ( RegNetShortCut(_A , _A , stride=_A ) if should_apply_shortcut else nn.Identity() ) UpperCAmelCase__ : List[str] = nn.Sequential( RegNetConvLayer(_A , _A , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_A , _A , stride=_A , groups=_A , activation=config.hidden_act ) , RegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_A , _A , kernel_size=1 , activation=_A ) , ) UpperCAmelCase__ : Tuple = ACTaFN[config.hidden_act] def lowercase_ ( self : Dict , _A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = hidden_state UpperCAmelCase__ : List[str] = self.layer(_A ) UpperCAmelCase__ : Tuple = self.shortcut(_A ) hidden_state += residual UpperCAmelCase__ : Union[str, Any] = self.activation(_A ) return hidden_state class lowerCamelCase_ ( nn.Module ): def __init__( self : Optional[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 2 , _A : int = 2 , ): '''simple docstring''' super().__init__() UpperCAmelCase__ : Union[str, Any] = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer UpperCAmelCase__ : str = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( _A , _A , _A , stride=_A , ) , *[layer(_A , _A , _A ) for _ in range(depth - 1 )] , ) def lowercase_ ( self : Tuple , _A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.layers(_A ) return hidden_state class lowerCamelCase_ ( nn.Module ): def __init__( self : str , _A : RegNetConfig ): '''simple docstring''' super().__init__() UpperCAmelCase__ : Tuple = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( _A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) UpperCAmelCase__ : Any = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(_A , config.depths[1:] ): self.stages.append(RegNetStage(_A , _A , _A , depth=_A ) ) def lowercase_ ( self : Optional[int] , _A : Tensor , _A : bool = False , _A : bool = True ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: UpperCAmelCase__ : Dict = hidden_states + (hidden_state,) UpperCAmelCase__ : str = stage_module(_A ) if output_hidden_states: UpperCAmelCase__ : Dict = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A ) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = RegNetConfig lowerCAmelCase__ = 'regnet' lowerCAmelCase__ = 'pixel_values' lowerCAmelCase__ = True def lowercase_ ( self : Optional[Any] , _A : Optional[Any] ): '''simple docstring''' if isinstance(_A , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' ) elif isinstance(_A , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def lowercase_ ( self : Union[str, Any] , _A : int , _A : Dict=False ): '''simple docstring''' if isinstance(_A , _A ): UpperCAmelCase__ : Dict = value UpperCamelCase__ = R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' UpperCamelCase__ = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , __a , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class lowerCamelCase_ ( __a ): def __init__( self : Union[str, Any] , _A : Tuple ): '''simple docstring''' super().__init__(_A ) UpperCAmelCase__ : Union[str, Any] = config UpperCAmelCase__ : Optional[int] = RegNetEmbeddings(_A ) UpperCAmelCase__ : Dict = RegNetEncoder(_A ) UpperCAmelCase__ : Dict = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowercase_ ( self : List[Any] , _A : Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase__ : str = self.embedder(_A ) UpperCAmelCase__ : Optional[Any] = self.encoder( _A , output_hidden_states=_A , return_dict=_A ) UpperCAmelCase__ : str = encoder_outputs[0] UpperCAmelCase__ : Union[str, Any] = self.pooler(_A ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_A , pooler_output=_A , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __a , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class lowerCamelCase_ ( __a ): def __init__( self : Any , _A : Tuple ): '''simple docstring''' super().__init__(_A ) UpperCAmelCase__ : Union[str, Any] = config.num_labels UpperCAmelCase__ : Union[str, Any] = RegNetModel(_A ) # classification head UpperCAmelCase__ : str = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowercase_ ( self : List[str] , _A : Optional[torch.FloatTensor] = None , _A : Optional[torch.LongTensor] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , ): '''simple docstring''' UpperCAmelCase__ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase__ : Union[str, Any] = self.regnet(_A , output_hidden_states=_A , return_dict=_A ) UpperCAmelCase__ : Tuple = outputs.pooler_output if return_dict else outputs[1] UpperCAmelCase__ : Dict = self.classifier(_A ) UpperCAmelCase__ : str = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: UpperCAmelCase__ : Optional[Any] = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): UpperCAmelCase__ : List[Any] = '''single_label_classification''' else: UpperCAmelCase__ : Any = '''multi_label_classification''' if self.config.problem_type == "regression": UpperCAmelCase__ : Union[str, Any] = MSELoss() if self.num_labels == 1: UpperCAmelCase__ : Tuple = loss_fct(logits.squeeze() , labels.squeeze() ) else: UpperCAmelCase__ : Any = loss_fct(_A , _A ) elif self.config.problem_type == "single_label_classification": UpperCAmelCase__ : Any = CrossEntropyLoss() UpperCAmelCase__ : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": UpperCAmelCase__ : List[str] = BCEWithLogitsLoss() UpperCAmelCase__ : List[str] = loss_fct(_A , _A ) if not return_dict: UpperCAmelCase__ : Optional[int] = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
75
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase__ = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''', }, } UpperCamelCase__ = { '''facebook/bart-base''': 1_0_2_4, '''facebook/bart-large''': 1_0_2_4, '''facebook/bart-large-mnli''': 1_0_2_4, '''facebook/bart-large-cnn''': 1_0_2_4, '''facebook/bart-large-xsum''': 1_0_2_4, '''yjernite/bart_eli5''': 1_0_2_4, } class lowerCamelCase_ ( __a ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = ['input_ids', 'attention_mask'] lowerCAmelCase__ = BartTokenizer def __init__( self : Tuple , _A : List[str]=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Tuple="replace" , _A : Optional[Any]="<s>" , _A : int="</s>" , _A : Optional[Any]="</s>" , _A : List[str]="<s>" , _A : Optional[int]="<unk>" , _A : Optional[int]="<pad>" , _A : str="<mask>" , _A : Dict=False , _A : int=True , **_A : Optional[Any] , ): '''simple docstring''' super().__init__( _A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , ) UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space: UpperCAmelCase__ : str = getattr(_A , pre_tok_state.pop('''type''' ) ) UpperCAmelCase__ : Any = add_prefix_space UpperCAmelCase__ : str = pre_tok_class(**_A ) UpperCAmelCase__ : Dict = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` UpperCAmelCase__ : Optional[Any] = '''post_processor''' UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , _A , _A ) if tokenizer_component_instance: UpperCAmelCase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCAmelCase__ : Union[str, Any] = tuple(state['''sep'''] ) if "cls" in state: UpperCAmelCase__ : Union[str, Any] = tuple(state['''cls'''] ) UpperCAmelCase__ : Dict = False if state.get('''add_prefix_space''' , _A ) != add_prefix_space: UpperCAmelCase__ : Union[str, Any] = add_prefix_space UpperCAmelCase__ : Dict = True if state.get('''trim_offsets''' , _A ) != trim_offsets: UpperCAmelCase__ : List[Any] = trim_offsets UpperCAmelCase__ : List[Any] = True if changes_to_apply: UpperCAmelCase__ : Dict = getattr(_A , state.pop('''type''' ) ) UpperCAmelCase__ : Union[str, Any] = component_class(**_A ) setattr(self.backend_tokenizer , _A , _A ) @property def lowercase_ ( self : Dict ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def lowercase_ ( self : Dict , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value UpperCAmelCase__ : str = value def lowercase_ ( self : Optional[int] , *_A : List[str] , **_A : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = kwargs.get('''is_split_into_words''' , _A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*_A , **_A ) def lowercase_ ( self : Optional[Any] , *_A : Union[str, Any] , **_A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = kwargs.get('''is_split_into_words''' , _A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*_A , **_A ) def lowercase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ): '''simple docstring''' UpperCAmelCase__ : str = self._tokenizer.model.save(_A , name=_A ) return tuple(_A ) def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Optional[int]=None ): '''simple docstring''' UpperCAmelCase__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase_ ( self : int , _A : List[int] , _A : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [self.sep_token_id] UpperCAmelCase__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
75
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = StableDiffusionPanoramaPipeline lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS def lowercase_ ( self : List[str] ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) UpperCAmelCase__ : Optional[Any] = DDIMScheduler() torch.manual_seed(0 ) UpperCAmelCase__ : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCAmelCase__ : List[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) UpperCAmelCase__ : Any = CLIPTextModel(_A ) UpperCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCAmelCase__ : Any = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowercase_ ( self : Any , _A : Optional[Any] , _A : Dict=0 ): '''simple docstring''' UpperCAmelCase__ : Tuple = torch.manual_seed(_A ) UpperCAmelCase__ : Optional[Any] = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, # Setting height and width to None to prevent OOMs on CPU. '''height''': None, '''width''': None, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : Tuple = self.get_dummy_components() UpperCAmelCase__ : str = StableDiffusionPanoramaPipeline(**_A ) UpperCAmelCase__ : int = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : List[Any] = self.get_dummy_inputs(_A ) UpperCAmelCase__ : int = sd_pipe(**_A ).images UpperCAmelCase__ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : Dict = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self : Tuple ): '''simple docstring''' super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowercase_ ( self : int ): '''simple docstring''' super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : Optional[int] = self.get_dummy_components() UpperCAmelCase__ : Dict = StableDiffusionPanoramaPipeline(**_A ) UpperCAmelCase__ : Optional[Any] = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : List[str] = self.get_dummy_inputs(_A ) UpperCAmelCase__ : List[Any] = '''french fries''' UpperCAmelCase__ : Tuple = sd_pipe(**_A , negative_prompt=_A ) UpperCAmelCase__ : Tuple = output.images UpperCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : List[Any] = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : Union[str, Any] = self.get_dummy_components() UpperCAmelCase__ : str = StableDiffusionPanoramaPipeline(**_A ) UpperCAmelCase__ : Optional[int] = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : str = self.get_dummy_inputs(_A ) UpperCAmelCase__ : int = sd_pipe(**_A , view_batch_size=2 ) UpperCAmelCase__ : Optional[int] = output.images UpperCAmelCase__ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : int = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : Optional[Any] = self.get_dummy_components() UpperCAmelCase__ : List[Any] = EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' ) UpperCAmelCase__ : List[str] = StableDiffusionPanoramaPipeline(**_A ) UpperCAmelCase__ : Union[str, Any] = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : List[Any] = self.get_dummy_inputs(_A ) UpperCAmelCase__ : Optional[int] = sd_pipe(**_A ).images UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : List[Any] = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : Tuple = self.get_dummy_components() UpperCAmelCase__ : List[str] = PNDMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , skip_prk_steps=_A ) UpperCAmelCase__ : Optional[int] = StableDiffusionPanoramaPipeline(**_A ) UpperCAmelCase__ : str = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : Tuple = self.get_dummy_inputs(_A ) UpperCAmelCase__ : Optional[int] = sd_pipe(**_A ).images UpperCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : int = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : int , _A : Tuple=0 ): '''simple docstring''' UpperCAmelCase__ : str = torch.manual_seed(_A ) UpperCAmelCase__ : str = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-base''' UpperCAmelCase__ : Optional[Any] = DDIMScheduler.from_pretrained(_A , subfolder='''scheduler''' ) UpperCAmelCase__ : Dict = StableDiffusionPanoramaPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() UpperCAmelCase__ : Tuple = self.get_inputs() UpperCAmelCase__ : Dict = pipe(**_A ).images UpperCAmelCase__ : List[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2_048, 3) UpperCAmelCase__ : Optional[Any] = np.array( [ 0.3_6_9_6_8_3_9_2, 0.2_7_0_2_5_3_7_2, 0.3_2_4_4_6_7_6_6, 0.2_8_3_7_9_3_8_7, 0.3_6_3_6_3_2_7_4, 0.3_0_7_3_3_3_4_7, 0.2_7_1_0_0_0_2_7, 0.2_7_0_5_4_1_2_5, 0.2_5_5_3_6_0_9_6, ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-2 def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Any = StableDiffusionPanoramaPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-base''' , safety_checker=_A ) UpperCAmelCase__ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() UpperCAmelCase__ : Union[str, Any] = self.get_inputs() UpperCAmelCase__ : Any = pipe(**_A ).images UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2_048, 3) UpperCAmelCase__ : int = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[str] = 0 def callback_fn(_A : int , _A : int , _A : torch.FloatTensor ) -> None: UpperCAmelCase__ : str = True nonlocal number_of_steps number_of_steps += 1 if step == 1: UpperCAmelCase__ : List[str] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) UpperCAmelCase__ : str = latents[0, -3:, -3:, -1] UpperCAmelCase__ : Tuple = np.array( [ 0.1_8_6_8_1_8_6_9, 0.3_3_9_0_7_8_1_6, 0.5_3_6_1_2_7_6, 0.1_4_4_3_2_8_6_5, -0.0_2_8_5_6_6_1_1, -0.7_3_9_4_1_1_2_3, 0.2_3_3_9_7_9_8_7, 0.4_7_3_2_2_6_8_2, -0.3_7_8_2_3_1_6_4, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: UpperCAmelCase__ : Tuple = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) UpperCAmelCase__ : int = latents[0, -3:, -3:, -1] UpperCAmelCase__ : Union[str, Any] = np.array( [ 0.1_8_5_3_9_6_4_5, 0.3_3_9_8_7_2_4_8, 0.5_3_7_8_5_5_9, 0.1_4_4_3_7_1_4_2, -0.0_2_4_5_5_2_6_1, -0.7_3_3_8_3_1_7, 0.2_3_9_9_0_7_5_5, 0.4_7_3_5_6_2_7_2, -0.3_7_8_6_5_0_5, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 UpperCAmelCase__ : Union[str, Any] = False UpperCAmelCase__ : Dict = '''stabilityai/stable-diffusion-2-base''' UpperCAmelCase__ : List[Any] = DDIMScheduler.from_pretrained(_A , subfolder='''scheduler''' ) UpperCAmelCase__ : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A ) UpperCAmelCase__ : Tuple = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() UpperCAmelCase__ : Optional[Any] = self.get_inputs() pipe(**_A , callback=_A , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def lowercase_ ( self : Tuple ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-base''' UpperCAmelCase__ : List[Any] = DDIMScheduler.from_pretrained(_A , subfolder='''scheduler''' ) UpperCAmelCase__ : Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A ) UpperCAmelCase__ : Tuple = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase__ : Dict = self.get_inputs() UpperCAmelCase__ : Optional[Any] = pipe(**_A ) UpperCAmelCase__ : Optional[int] = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
75
'''simple docstring''' import random from typing import Any def a__ ( lowerCAmelCase__ ) -> list[Any]: for _ in range(len(lowerCAmelCase__ ) ): UpperCAmelCase__ : int = random.randint(0 , len(lowerCAmelCase__ ) - 1 ) UpperCAmelCase__ : Optional[int] = random.randint(0 , len(lowerCAmelCase__ ) - 1 ) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = data[b], data[a] return data if __name__ == "__main__": UpperCamelCase__ = [0, 1, 2, 3, 4, 5, 6, 7] UpperCamelCase__ = ['''python''', '''says''', '''hello''', '''!'''] print('''Fisher-Yates Shuffle:''') print('''List''', integers, strings) print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
75
1
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse('''0.8.3'''): raise Exception('''requires gluonnlp == 0.8.3''') if version.parse(mx.__version__) != version.parse('''1.5.0'''): raise Exception('''requires mxnet == 1.5.0''') logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = '''The Nymphenburg Palace is a beautiful palace in Munich!''' def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: UpperCAmelCase__ : Optional[Any] = { '''attention_cell''': '''multi_head''', '''num_layers''': 4, '''units''': 10_24, '''hidden_size''': 7_68, '''max_length''': 5_12, '''num_heads''': 8, '''scaled''': True, '''dropout''': 0.1, '''use_residual''': True, '''embed_size''': 10_24, '''embed_dropout''': 0.1, '''word_embed''': None, '''layer_norm_eps''': 1E-5, '''token_type_vocab_size''': 2, } UpperCAmelCase__ : int = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py UpperCAmelCase__ : Tuple = BERTEncoder( attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=lowerCAmelCase__ , output_all_encodings=lowerCAmelCase__ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , lowerCAmelCase__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later UpperCAmelCase__ : Union[str, Any] = '''openwebtext_ccnews_stories_books_cased''' # Specify download folder to Gluonnlp's vocab UpperCAmelCase__ : Tuple = os.path.join(get_home_dir() , '''models''' ) UpperCAmelCase__ : Tuple = _load_vocab(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , cls=lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = nlp.model.BERTModel( lowerCAmelCase__ , len(lowerCAmelCase__ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=lowerCAmelCase__ , use_token_type_embed=lowerCAmelCase__ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=lowerCAmelCase__ , use_decoder=lowerCAmelCase__ , ) original_bort.load_parameters(lowerCAmelCase__ , cast_dtype=lowerCAmelCase__ , ignore_extra=lowerCAmelCase__ ) UpperCAmelCase__ : Optional[Any] = original_bort._collect_params_with_prefix() # Build our config 🤗 UpperCAmelCase__ : List[str] = { '''architectures''': ['''BertForMaskedLM'''], '''attention_probs_dropout_prob''': predefined_args['''dropout'''], '''hidden_act''': '''gelu''', '''hidden_dropout_prob''': predefined_args['''dropout'''], '''hidden_size''': predefined_args['''embed_size'''], '''initializer_range''': 0.0_2, '''intermediate_size''': predefined_args['''hidden_size'''], '''layer_norm_eps''': predefined_args['''layer_norm_eps'''], '''max_position_embeddings''': predefined_args['''max_length'''], '''model_type''': '''bort''', '''num_attention_heads''': predefined_args['''num_heads'''], '''num_hidden_layers''': predefined_args['''num_layers'''], '''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa '''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa '''vocab_size''': len(lowerCAmelCase__ ), } UpperCAmelCase__ : Tuple = BertConfig.from_dict(lowerCAmelCase__ ) UpperCAmelCase__ : Optional[Any] = BertForMaskedLM(lowerCAmelCase__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(lowerCAmelCase__ ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase__ : Optional[Any] = hf_param.shape UpperCAmelCase__ : Optional[int] = to_torch(params[gluon_param] ) UpperCAmelCase__ : List[str] = gluon_param.shape assert ( shape_hf == shape_gluon ), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers""" return gluon_param UpperCAmelCase__ : Dict = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' ) UpperCAmelCase__ : Union[str, Any] = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' ) UpperCAmelCase__ : Any = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' ) UpperCAmelCase__ : Optional[int] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) UpperCAmelCase__ : Any = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): UpperCAmelCase__ : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention UpperCAmelCase__ : BertSelfAttention = layer.attention.self UpperCAmelCase__ : Optional[Any] = check_and_map_params( self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" ) UpperCAmelCase__ : Optional[Any] = check_and_map_params( self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" ) UpperCAmelCase__ : Optional[Any] = check_and_map_params( self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" ) UpperCAmelCase__ : Dict = check_and_map_params( self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" ) UpperCAmelCase__ : Any = check_and_map_params( self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" ) UpperCAmelCase__ : Optional[Any] = check_and_map_params( self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" ) # self attention output UpperCAmelCase__ : BertSelfOutput = layer.attention.output UpperCAmelCase__ : List[Any] = check_and_map_params( self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" ) UpperCAmelCase__ : Tuple = check_and_map_params( self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" ) UpperCAmelCase__ : Optional[int] = check_and_map_params( self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" ) UpperCAmelCase__ : Optional[Any] = check_and_map_params( self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" ) # intermediate UpperCAmelCase__ : BertIntermediate = layer.intermediate UpperCAmelCase__ : List[Any] = check_and_map_params( intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" ) UpperCAmelCase__ : List[str] = check_and_map_params( intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" ) # output UpperCAmelCase__ : BertOutput = layer.output UpperCAmelCase__ : str = check_and_map_params( bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" ) UpperCAmelCase__ : Tuple = check_and_map_params( bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" ) UpperCAmelCase__ : Tuple = check_and_map_params( bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" ) UpperCAmelCase__ : Any = check_and_map_params( bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models UpperCAmelCase__ : Optional[int] = RobertaTokenizer.from_pretrained('''roberta-base''' ) UpperCAmelCase__ : Tuple = tokenizer.encode_plus(lowerCAmelCase__ )['''input_ids'''] # Get gluon output UpperCAmelCase__ : Union[str, Any] = mx.nd.array([input_ids] ) UpperCAmelCase__ : Optional[int] = original_bort(inputs=lowerCAmelCase__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = BertModel.from_pretrained(lowerCAmelCase__ ) hf_bort_model.eval() UpperCAmelCase__ : Union[str, Any] = tokenizer.encode_plus(lowerCAmelCase__ , return_tensors='''pt''' ) UpperCAmelCase__ : List[str] = hf_bort_model(**lowerCAmelCase__ )[0] UpperCAmelCase__ : List[str] = output_gluon[0].asnumpy() UpperCAmelCase__ : Any = output_hf[0].detach().numpy() UpperCAmelCase__ : Any = np.max(np.abs(hf_layer - gluon_layer ) ).item() UpperCAmelCase__ : List[Any] = np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) if success: print('''✔️ Both model do output the same tensors''' ) else: print('''❌ Both model do **NOT** output the same tensors''' ) print('''Absolute difference is:''' , lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCamelCase__ = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
75
'''simple docstring''' import math def a__ ( lowerCAmelCase__ ) -> list[int]: UpperCAmelCase__ : List[Any] = [] UpperCAmelCase__ : Dict = 2 UpperCAmelCase__ : Optional[Any] = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment UpperCAmelCase__ : str = [True] * (end + 1) UpperCAmelCase__ : Any = [] while start <= end: if temp[start] is True: in_prime.append(lowerCAmelCase__ ) for i in range(start * start , end + 1 , lowerCAmelCase__ ): UpperCAmelCase__ : Dict = False start += 1 prime += in_prime UpperCAmelCase__ : Optional[int] = end + 1 UpperCAmelCase__ : str = min(2 * end , lowerCAmelCase__ ) while low <= n: UpperCAmelCase__ : List[str] = [True] * (high - low + 1) for each in in_prime: UpperCAmelCase__ : List[str] = math.floor(low / each ) * each if t < low: t += each for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ): UpperCAmelCase__ : Union[str, Any] = False for j in range(len(lowerCAmelCase__ ) ): if temp[j] is True: prime.append(j + low ) UpperCAmelCase__ : Union[str, Any] = high + 1 UpperCAmelCase__ : str = min(high + end , lowerCAmelCase__ ) return prime print(sieve(1_0**6))
75
1
'''simple docstring''' # Imports import numpy as np class lowerCamelCase_ : def __init__( self : Dict , _A : int=None , _A : Any=None , _A : List[str]=None , _A : Optional[int]=None , _A : Optional[int]=None ): '''simple docstring''' self.set_matricies(red=_A , green=_A , blue=_A , red_edge=_A , nir=_A ) def lowercase_ ( self : Optional[Any] , _A : int=None , _A : Union[str, Any]=None , _A : Any=None , _A : Dict=None , _A : Optional[Any]=None ): '''simple docstring''' if red is not None: UpperCAmelCase__ : Dict = red if green is not None: UpperCAmelCase__ : Tuple = green if blue is not None: UpperCAmelCase__ : Any = blue if red_edge is not None: UpperCAmelCase__ : Dict = red_edge if nir is not None: UpperCAmelCase__ : Any = nir return True def lowercase_ ( self : int , _A : Dict="" , _A : Union[str, Any]=None , _A : str=None , _A : Any=None , _A : str=None , _A : int=None ): '''simple docstring''' self.set_matricies(red=_A , green=_A , blue=_A , red_edge=_A , nir=_A ) UpperCAmelCase__ : Union[str, Any] = { '''ARVI2''': self.arvaa, '''CCCI''': self.ccci, '''CVI''': self.cvi, '''GLI''': self.gli, '''NDVI''': self.ndvi, '''BNDVI''': self.bndvi, '''redEdgeNDVI''': self.red_edge_ndvi, '''GNDVI''': self.gndvi, '''GBNDVI''': self.gbndvi, '''GRNDVI''': self.grndvi, '''RBNDVI''': self.rbndvi, '''PNDVI''': self.pndvi, '''ATSAVI''': self.atsavi, '''BWDRVI''': self.bwdrvi, '''CIgreen''': self.ci_green, '''CIrededge''': self.ci_rededge, '''CI''': self.ci, '''CTVI''': self.ctvi, '''GDVI''': self.gdvi, '''EVI''': self.evi, '''GEMI''': self.gemi, '''GOSAVI''': self.gosavi, '''GSAVI''': self.gsavi, '''Hue''': self.hue, '''IVI''': self.ivi, '''IPVI''': self.ipvi, '''I''': self.i, '''RVI''': self.rvi, '''MRVI''': self.mrvi, '''MSAVI''': self.m_savi, '''NormG''': self.norm_g, '''NormNIR''': self.norm_nir, '''NormR''': self.norm_r, '''NGRDI''': self.ngrdi, '''RI''': self.ri, '''S''': self.s, '''IF''': self._if, '''DVI''': self.dvi, '''TVI''': self.tvi, '''NDRE''': self.ndre, } try: return funcs[index]() except KeyError: print('''Index not in the list!''' ) return False def lowercase_ ( self : List[Any] ): '''simple docstring''' return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red))) def lowercase_ ( self : Optional[int] ): '''simple docstring''' return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return self.nir * (self.red / (self.green**2)) def lowercase_ ( self : str ): '''simple docstring''' return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def lowercase_ ( self : Tuple ): '''simple docstring''' return (self.nir - self.red) / (self.nir + self.red) def lowercase_ ( self : Dict ): '''simple docstring''' return (self.nir - self.blue) / (self.nir + self.blue) def lowercase_ ( self : Tuple ): '''simple docstring''' return (self.redEdge - self.red) / (self.redEdge + self.red) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' return (self.nir - self.green) / (self.nir + self.green) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def lowercase_ ( self : Any ): '''simple docstring''' return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def lowercase_ ( self : str ): '''simple docstring''' return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def lowercase_ ( self : str ): '''simple docstring''' return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def lowercase_ ( self : Tuple , _A : int=0.0_8 , _A : int=1.2_2 , _A : List[Any]=0.0_3 ): '''simple docstring''' return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def lowercase_ ( self : List[Any] ): '''simple docstring''' return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def lowercase_ ( self : List[str] ): '''simple docstring''' return (self.nir / self.green) - 1 def lowercase_ ( self : Any ): '''simple docstring''' return (self.nir / self.redEdge) - 1 def lowercase_ ( self : Dict ): '''simple docstring''' return (self.red - self.blue) / self.red def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return self.nir - self.green def lowercase_ ( self : Optional[int] ): '''simple docstring''' return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red) def lowercase_ ( self : Optional[int] , _A : List[Any]=0.1_6 ): '''simple docstring''' return (self.nir - self.green) / (self.nir + self.green + y) def lowercase_ ( self : List[str] , _A : str=0.5 ): '''simple docstring''' return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' return np.arctan( ((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) ) def lowercase_ ( self : Optional[Any] , _A : str=None , _A : Any=None ): '''simple docstring''' return (self.nir - b) / (a * self.red) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return (self.red + self.green + self.blue) / 3_0.5 def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' return self.nir / self.red def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return (self.rvi() - 1) / (self.rvi() + 1) def lowercase_ ( self : Tuple ): '''simple docstring''' return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def lowercase_ ( self : Tuple ): '''simple docstring''' return self.green / (self.nir + self.red + self.green) def lowercase_ ( self : int ): '''simple docstring''' return self.nir / (self.nir + self.red + self.green) def lowercase_ ( self : List[Any] ): '''simple docstring''' return self.red / (self.nir + self.red + self.green) def lowercase_ ( self : Optional[int] ): '''simple docstring''' return (self.green - self.red) / (self.green + self.red) def lowercase_ ( self : Dict ): '''simple docstring''' return (self.red - self.green) / (self.red + self.green) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) UpperCAmelCase__ : str = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def lowercase_ ( self : Dict ): '''simple docstring''' return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def lowercase_ ( self : Dict ): '''simple docstring''' return self.nir / self.red def lowercase_ ( self : List[str] ): '''simple docstring''' return (self.ndvi() + 0.5) ** (1 / 2) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return (self.nir - self.redEdge) / (self.nir + self.redEdge)
75
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ): lowerCAmelCase__ = StableDiffusionInpaintPipeline lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowerCAmelCase__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowerCAmelCase__ = frozenset([] ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , ) UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=_A ) torch.manual_seed(0 ) UpperCAmelCase__ : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , ) UpperCAmelCase__ : Union[str, Any] = CLIPTextModel(_A ) UpperCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCAmelCase__ : str = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowercase_ ( self : str , _A : Dict , _A : Any=0 ): '''simple docstring''' UpperCAmelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((64, 64) ) UpperCAmelCase__ : int = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) ) if str(_A ).startswith('''mps''' ): UpperCAmelCase__ : List[Any] = torch.manual_seed(_A ) else: UpperCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase__ : Optional[int] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': init_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : Tuple = self.get_dummy_components() UpperCAmelCase__ : str = StableDiffusionInpaintPipeline(**_A ) UpperCAmelCase__ : List[str] = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : Dict = self.get_dummy_inputs(_A ) UpperCAmelCase__ : Any = sd_pipe(**_A ).images UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : int = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self : Tuple ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : List[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase__ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase__ : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench.npy''' ) UpperCAmelCase__ : Dict = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase__ : str = torch.manual_seed(0 ) UpperCAmelCase__ : str = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , ) UpperCAmelCase__ : int = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 9e-3 def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase__ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase__ : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' ) UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase__ : Any = StableDiffusionInpaintPipeline.from_pretrained( _A , torch_dtype=torch.floataa , safety_checker=_A , ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , ) UpperCAmelCase__ : Tuple = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def lowercase_ ( self : Any ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase__ : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase__ : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase__ : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase__ : str = PNDMScheduler.from_pretrained(_A , subfolder='''scheduler''' ) UpperCAmelCase__ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained( _A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase__ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase__ : Tuple = torch.manual_seed(0 ) UpperCAmelCase__ : Any = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , ) UpperCAmelCase__ : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.6_5 * 10**9
75
1
'''simple docstring''' import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch UpperCamelCase__ = '''sshleifer/bart-tiny-random''' UpperCamelCase__ = '''patrickvonplaten/t5-tiny-random''' @require_torch class lowerCamelCase_ ( unittest.TestCase ): @cached_property def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' return AutoConfig.from_pretrained(_A ) def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ , *UpperCAmelCase__ : Optional[int] = create_student_by_copying_alternating_layers(_A , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ , *UpperCAmelCase__ : int = create_student_by_copying_alternating_layers(_A , tempfile.mkdtemp() , e=1 , d=_A ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ , *UpperCAmelCase__ : Tuple = create_student_by_copying_alternating_layers(_A , tempfile.mkdtemp() , e=1 , d=_A ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ , *UpperCAmelCase__ : Optional[int] = create_student_by_copying_alternating_layers(_A , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def lowercase_ ( self : str ): '''simple docstring''' with self.assertRaises(_A ): create_student_by_copying_alternating_layers(_A , tempfile.mkdtemp() , e=_A , d=_A )
75
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCamelCase__ = '''platform''' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Tuple: if attention_mask is None: UpperCAmelCase__ : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: UpperCAmelCase__ : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: UpperCAmelCase__ : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase__ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase__ : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class lowerCamelCase_ : def __init__( self : Optional[Any] , _A : Optional[Any] , _A : str=13 , _A : int=7 , _A : Any=True , _A : List[Any]=False , _A : Optional[int]=99 , _A : Optional[int]=16 , _A : int=2 , _A : Optional[int]=4 , _A : Optional[int]=4 , _A : int="gelu" , _A : List[str]=0.1 , _A : str=0.1 , _A : int=32 , _A : Optional[int]=2 , _A : int=1 , _A : Dict=0 , _A : Dict=0.0_2 , ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = parent UpperCAmelCase__ : str = batch_size UpperCAmelCase__ : Dict = seq_length UpperCAmelCase__ : str = is_training UpperCAmelCase__ : int = use_labels UpperCAmelCase__ : Union[str, Any] = vocab_size UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : Any = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : str = hidden_act UpperCAmelCase__ : str = hidden_dropout_prob UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase__ : Union[str, Any] = max_position_embeddings UpperCAmelCase__ : int = eos_token_id UpperCAmelCase__ : Optional[int] = pad_token_id UpperCAmelCase__ : List[str] = bos_token_id UpperCAmelCase__ : Union[str, Any] = initializer_range def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) UpperCAmelCase__ : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) UpperCAmelCase__ : List[Any] = shift_tokens_right(_A , 1 , 2 ) UpperCAmelCase__ : List[Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , ) UpperCAmelCase__ : Tuple = prepare_blenderbot_inputs_dict(_A , _A , _A ) return config, inputs_dict def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs() return config, inputs_dict def lowercase_ ( self : int , _A : List[Any] , _A : Optional[Any] , _A : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = 20 UpperCAmelCase__ : int = model_class_name(_A ) UpperCAmelCase__ : str = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase__ , UpperCAmelCase__ : Dict = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _A , _A ) UpperCAmelCase__ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) UpperCAmelCase__ : str = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase__ : str = model.decode( decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , ) UpperCAmelCase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase__ : Tuple = model.decode( decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , ) UpperCAmelCase__ : int = model.decode(_A , _A ) UpperCAmelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def lowercase_ ( self : Tuple , _A : List[Any] , _A : Tuple , _A : Tuple ): '''simple docstring''' UpperCAmelCase__ : Tuple = 20 UpperCAmelCase__ : Optional[int] = model_class_name(_A ) UpperCAmelCase__ : Optional[int] = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase__ : Any = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) UpperCAmelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A ) UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase__ : int = model.decode( decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , ) UpperCAmelCase__ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase__ : Any = model.decode( decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , ) UpperCAmelCase__ : List[str] = model.decode(_A , _A , decoder_attention_mask=_A ) UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class lowerCamelCase_ ( unittest.TestCase ): lowerCAmelCase__ = 9_9 def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) UpperCAmelCase__ : int = input_ids.shape[0] UpperCAmelCase__ : List[str] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._get_config_and_data() UpperCAmelCase__ : Any = FlaxBlenderbotForConditionalGeneration(_A ) UpperCAmelCase__ : Optional[int] = lm_model(input_ids=_A ) UpperCAmelCase__ : Dict = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) UpperCAmelCase__ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_A ) UpperCAmelCase__ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) UpperCAmelCase__ : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) UpperCAmelCase__ : Tuple = lm_model(input_ids=_A , decoder_input_ids=_A ) UpperCAmelCase__ : int = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) UpperCAmelCase__ : Union[str, Any] = shift_tokens_right(_A , 1 , 2 ) UpperCAmelCase__ : str = np.equal(_A , 1 ).astype(np.floataa ).sum() UpperCAmelCase__ : Dict = np.equal(_A , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(_A , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class lowerCamelCase_ ( __a , unittest.TestCase , __a ): lowerCAmelCase__ = True lowerCAmelCase__ = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = FlaxBlenderbotModelTester(self ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_A , _A , _A ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : Dict = self._prepare_for_class(_A , _A ) UpperCAmelCase__ : str = model_class(_A ) @jax.jit def encode_jitted(_A : Any , _A : Tuple=None , **_A : Optional[int] ): return model.encode(input_ids=_A , attention_mask=_A ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase__ : Optional[Any] = encode_jitted(**_A ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase__ : Tuple = encode_jitted(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) ) for jitted_output, output in zip(_A , _A ): self.assertEqual(jitted_output.shape , output.shape ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : List[str] = model_class(_A ) UpperCAmelCase__ : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) UpperCAmelCase__ : Tuple = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(_A : Optional[int] , _A : List[Any] , _A : int ): return model.decode( decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase__ : Any = decode_jitted(**_A ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase__ : Optional[int] = decode_jitted(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) ) for jitted_output, output in zip(_A , _A ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowercase_ ( self : List[str] ): '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids UpperCAmelCase__ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id UpperCAmelCase__ : Union[str, Any] = model(_A ) self.assertIsNotNone(_A ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25} UpperCAmelCase__ : int = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} UpperCAmelCase__ : str = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_A ) UpperCAmelCase__ : Optional[Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) UpperCAmelCase__ : Optional[Any] = ['''Sam'''] UpperCAmelCase__ : Dict = tokenizer(_A , return_tensors='''jax''' ) UpperCAmelCase__ : List[str] = model.generate(**_A , **_A ) UpperCAmelCase__ : Dict = '''Sam is a great name. It means "sun" in Gaelic.''' UpperCAmelCase__ : Any = tokenizer.batch_decode(_A , **_A ) assert generated_txt[0].strip() == tgt_text
75
1
'''simple docstring''' from __future__ import annotations UpperCamelCase__ = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0] UpperCamelCase__ = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1] def a__ ( lowerCAmelCase__ ) -> list[float]: UpperCAmelCase__ : Any = [] UpperCAmelCase__ : int = len(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ ): UpperCAmelCase__ : float = -1 for j in range(i + 1 , lowerCAmelCase__ ): if arr[i] < arr[j]: UpperCAmelCase__ : List[str] = arr[j] break result.append(lowerCAmelCase__ ) return result def a__ ( lowerCAmelCase__ ) -> list[float]: UpperCAmelCase__ : List[str] = [] for i, outer in enumerate(lowerCAmelCase__ ): UpperCAmelCase__ : float = -1 for inner in arr[i + 1 :]: if outer < inner: UpperCAmelCase__ : List[str] = inner break result.append(lowerCAmelCase__ ) return result def a__ ( lowerCAmelCase__ ) -> list[float]: UpperCAmelCase__ : Optional[Any] = len(lowerCAmelCase__ ) UpperCAmelCase__ : list[float] = [] UpperCAmelCase__ : list[float] = [-1] * arr_size for index in reversed(range(lowerCAmelCase__ ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: UpperCAmelCase__ : Any = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) UpperCamelCase__ = ( '''from __main__ import arr, next_greatest_element_slow, ''' '''next_greatest_element_fast, next_greatest_element''' ) print( '''next_greatest_element_slow():''', timeit('''next_greatest_element_slow(arr)''', setup=setup), ) print( '''next_greatest_element_fast():''', timeit('''next_greatest_element_fast(arr)''', setup=setup), ) print( ''' next_greatest_element():''', timeit('''next_greatest_element(arr)''', setup=setup), )
75
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class lowerCamelCase_ ( datasets.BeamBasedBuilder ): def lowercase_ ( self : str ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_A , ) def lowercase_ ( self : int , _A : Optional[int] , _A : Optional[Any] ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )] def lowercase_ ( self : Union[str, Any] , _A : str , _A : Union[str, Any] ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_A ) class lowerCamelCase_ ( datasets.BeamBasedBuilder ): def lowercase_ ( self : Any ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_A , ) def lowercase_ ( self : Any , _A : List[str] , _A : Any ): '''simple docstring''' return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} ) ] def lowercase_ ( self : List[str] , _A : Optional[int] , _A : Tuple ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_A ) def a__ ( ) -> Tuple: return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] def a__ ( ) -> Optional[Any]: return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] class lowerCamelCase_ ( __a ): @require_beam def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Any = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) UpperCAmelCase__ : Union[str, Any] = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A ) self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def lowercase_ ( self : Any ): '''simple docstring''' import apache_beam as beam UpperCAmelCase__ : List[str] = beam.io.parquetio.WriteToParquet UpperCAmelCase__ : int = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Optional[int] = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' ) with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock: UpperCAmelCase__ : Dict = partial(_A , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( _A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertTrue( os.path.exists( os.path.join( _A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) ) self.assertTrue( os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def lowercase_ ( self : int ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Tuple = DummyBeamDataset(cache_dir=_A ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : int = NestedBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) ) UpperCAmelCase__ : Optional[int] = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A ) self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset
75
1
'''simple docstring''' from __future__ import annotations import time UpperCamelCase__ = list[tuple[int, int]] UpperCamelCase__ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] UpperCamelCase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class lowerCamelCase_ : def __init__( self : Union[str, Any] , _A : int , _A : int , _A : int , _A : int , _A : Node | None ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = pos_x UpperCAmelCase__ : Optional[int] = pos_y UpperCAmelCase__ : Optional[int] = (pos_y, pos_x) UpperCAmelCase__ : Optional[Any] = goal_x UpperCAmelCase__ : Tuple = goal_y UpperCAmelCase__ : Union[str, Any] = parent class lowerCamelCase_ : def __init__( self : int , _A : tuple[int, int] , _A : tuple[int, int] ): '''simple docstring''' UpperCAmelCase__ : int = Node(start[1] , start[0] , goal[1] , goal[0] , _A ) UpperCAmelCase__ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , _A ) UpperCAmelCase__ : int = [self.start] UpperCAmelCase__ : List[str] = False def lowercase_ ( self : Optional[Any] ): '''simple docstring''' while self.node_queue: UpperCAmelCase__ : Dict = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: UpperCAmelCase__ : Tuple = True return self.retrace_path(_A ) UpperCAmelCase__ : Dict = self.get_successors(_A ) for node in successors: self.node_queue.append(_A ) if not self.reached: return [self.start.pos] return None def lowercase_ ( self : Optional[int] , _A : Node ): '''simple docstring''' UpperCAmelCase__ : List[Any] = [] for action in delta: UpperCAmelCase__ : int = parent.pos_x + action[1] UpperCAmelCase__ : str = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_A ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(_A , _A , self.target.pos_y , self.target.pos_x , _A ) ) return successors def lowercase_ ( self : Any , _A : Node | None ): '''simple docstring''' UpperCAmelCase__ : List[Any] = node UpperCAmelCase__ : Tuple = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCAmelCase__ : Dict = current_node.parent path.reverse() return path class lowerCamelCase_ : def __init__( self : int , _A : Optional[Any] , _A : Dict ): '''simple docstring''' UpperCAmelCase__ : Tuple = BreadthFirstSearch(_A , _A ) UpperCAmelCase__ : Dict = BreadthFirstSearch(_A , _A ) UpperCAmelCase__ : Union[str, Any] = False def lowercase_ ( self : List[str] ): '''simple docstring''' while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: UpperCAmelCase__ : Tuple = self.fwd_bfs.node_queue.pop(0 ) UpperCAmelCase__ : Optional[Any] = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: UpperCAmelCase__ : Optional[Any] = True return self.retrace_bidirectional_path( _A , _A ) UpperCAmelCase__ : List[str] = current_bwd_node UpperCAmelCase__ : Dict = current_fwd_node UpperCAmelCase__ : str = { self.fwd_bfs: self.fwd_bfs.get_successors(_A ), self.bwd_bfs: self.bwd_bfs.get_successors(_A ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(_A ) if not self.reached: return [self.fwd_bfs.start.pos] return None def lowercase_ ( self : Dict , _A : Node , _A : Node ): '''simple docstring''' UpperCAmelCase__ : int = self.fwd_bfs.retrace_path(_A ) UpperCAmelCase__ : str = self.bwd_bfs.retrace_path(_A ) bwd_path.pop() bwd_path.reverse() UpperCAmelCase__ : str = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() UpperCamelCase__ = (0, 0) UpperCamelCase__ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) UpperCamelCase__ = time.time() UpperCamelCase__ = BreadthFirstSearch(init, goal) UpperCamelCase__ = bfs.search() UpperCamelCase__ = time.time() - start_bfs_time print('''Unidirectional BFS computation time : ''', bfs_time) UpperCamelCase__ = time.time() UpperCamelCase__ = BidirectionalBreadthFirstSearch(init, goal) UpperCamelCase__ = bd_bfs.search() UpperCamelCase__ = time.time() - start_bd_bfs_time print('''Bidirectional BFS computation time : ''', bd_bfs_time)
75
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input UpperCamelCase__ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def a__ ( ) -> List[str]: UpperCAmelCase__ : Optional[int] = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: UpperCAmelCase__ : Any = get_sagemaker_input() else: UpperCAmelCase__ : List[str] = get_cluster_input() return config def a__ ( lowerCAmelCase__=None ) -> List[Any]: if subparsers is not None: UpperCAmelCase__ : Union[str, Any] = subparsers.add_parser('''config''' , description=lowerCAmelCase__ ) else: UpperCAmelCase__ : Dict = argparse.ArgumentParser('''Accelerate config command''' , description=lowerCAmelCase__ ) parser.add_argument( '''--config_file''' , default=lowerCAmelCase__ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=lowerCAmelCase__ ) return parser def a__ ( lowerCAmelCase__ ) -> List[Any]: UpperCAmelCase__ : List[Any] = get_user_input() if args.config_file is not None: UpperCAmelCase__ : Any = args.config_file else: if not os.path.isdir(lowerCAmelCase__ ): os.makedirs(lowerCAmelCase__ ) UpperCAmelCase__ : int = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(lowerCAmelCase__ ) else: config.to_yaml_file(lowerCAmelCase__ ) print(F"""accelerate configuration saved at {config_file}""" ) def a__ ( ) -> str: UpperCAmelCase__ : Optional[int] = config_command_parser() UpperCAmelCase__ : Any = parser.parse_args() config_command(lowerCAmelCase__ ) if __name__ == "__main__": main()
75
1
'''simple docstring''' def a__ ( lowerCAmelCase__ ) -> list: if n_term == "": return [] UpperCAmelCase__ : list = [] for temp in range(int(lowerCAmelCase__ ) ): series.append(F"""1/{temp + 1}""" if series else '''1''' ) return series if __name__ == "__main__": UpperCamelCase__ = input('''Enter the last number (nth term) of the Harmonic Series''') print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''') print(harmonic_series(nth_term))
75
'''simple docstring''' import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: # Construct model if gpta_config_file == "": UpperCAmelCase__ : Optional[Any] = GPTaConfig() else: UpperCAmelCase__ : Tuple = GPTaConfig.from_json_file(lowerCAmelCase__ ) UpperCAmelCase__ : Dict = GPTaModel(lowerCAmelCase__ ) # Load weights from numpy load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Save pytorch-model UpperCAmelCase__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME UpperCAmelCase__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , lowerCAmelCase__ ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--gpt2_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained OpenAI model. \n''' '''This specifies the model architecture.''' ), ) UpperCamelCase__ = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
75
1
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCamelCase__ = '''platform''' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Tuple: if attention_mask is None: UpperCAmelCase__ : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: UpperCAmelCase__ : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: UpperCAmelCase__ : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase__ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase__ : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class lowerCamelCase_ : def __init__( self : Optional[Any] , _A : Optional[Any] , _A : str=13 , _A : int=7 , _A : Any=True , _A : List[Any]=False , _A : Optional[int]=99 , _A : Optional[int]=16 , _A : int=2 , _A : Optional[int]=4 , _A : Optional[int]=4 , _A : int="gelu" , _A : List[str]=0.1 , _A : str=0.1 , _A : int=32 , _A : Optional[int]=2 , _A : int=1 , _A : Dict=0 , _A : Dict=0.0_2 , ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = parent UpperCAmelCase__ : str = batch_size UpperCAmelCase__ : Dict = seq_length UpperCAmelCase__ : str = is_training UpperCAmelCase__ : int = use_labels UpperCAmelCase__ : Union[str, Any] = vocab_size UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : Any = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : str = hidden_act UpperCAmelCase__ : str = hidden_dropout_prob UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase__ : Union[str, Any] = max_position_embeddings UpperCAmelCase__ : int = eos_token_id UpperCAmelCase__ : Optional[int] = pad_token_id UpperCAmelCase__ : List[str] = bos_token_id UpperCAmelCase__ : Union[str, Any] = initializer_range def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) UpperCAmelCase__ : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) UpperCAmelCase__ : List[Any] = shift_tokens_right(_A , 1 , 2 ) UpperCAmelCase__ : List[Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , ) UpperCAmelCase__ : Tuple = prepare_blenderbot_inputs_dict(_A , _A , _A ) return config, inputs_dict def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs() return config, inputs_dict def lowercase_ ( self : int , _A : List[Any] , _A : Optional[Any] , _A : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = 20 UpperCAmelCase__ : int = model_class_name(_A ) UpperCAmelCase__ : str = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase__ , UpperCAmelCase__ : Dict = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _A , _A ) UpperCAmelCase__ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) UpperCAmelCase__ : str = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase__ : str = model.decode( decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , ) UpperCAmelCase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase__ : Tuple = model.decode( decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , ) UpperCAmelCase__ : int = model.decode(_A , _A ) UpperCAmelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def lowercase_ ( self : Tuple , _A : List[Any] , _A : Tuple , _A : Tuple ): '''simple docstring''' UpperCAmelCase__ : Tuple = 20 UpperCAmelCase__ : Optional[int] = model_class_name(_A ) UpperCAmelCase__ : Optional[int] = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase__ : Any = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) UpperCAmelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A ) UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase__ : int = model.decode( decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , ) UpperCAmelCase__ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase__ : Any = model.decode( decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , ) UpperCAmelCase__ : List[str] = model.decode(_A , _A , decoder_attention_mask=_A ) UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class lowerCamelCase_ ( unittest.TestCase ): lowerCAmelCase__ = 9_9 def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) UpperCAmelCase__ : int = input_ids.shape[0] UpperCAmelCase__ : List[str] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._get_config_and_data() UpperCAmelCase__ : Any = FlaxBlenderbotForConditionalGeneration(_A ) UpperCAmelCase__ : Optional[int] = lm_model(input_ids=_A ) UpperCAmelCase__ : Dict = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) UpperCAmelCase__ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_A ) UpperCAmelCase__ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) UpperCAmelCase__ : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) UpperCAmelCase__ : Tuple = lm_model(input_ids=_A , decoder_input_ids=_A ) UpperCAmelCase__ : int = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) UpperCAmelCase__ : Union[str, Any] = shift_tokens_right(_A , 1 , 2 ) UpperCAmelCase__ : str = np.equal(_A , 1 ).astype(np.floataa ).sum() UpperCAmelCase__ : Dict = np.equal(_A , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(_A , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class lowerCamelCase_ ( __a , unittest.TestCase , __a ): lowerCAmelCase__ = True lowerCAmelCase__ = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = FlaxBlenderbotModelTester(self ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_A , _A , _A ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : Dict = self._prepare_for_class(_A , _A ) UpperCAmelCase__ : str = model_class(_A ) @jax.jit def encode_jitted(_A : Any , _A : Tuple=None , **_A : Optional[int] ): return model.encode(input_ids=_A , attention_mask=_A ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase__ : Optional[Any] = encode_jitted(**_A ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase__ : Tuple = encode_jitted(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) ) for jitted_output, output in zip(_A , _A ): self.assertEqual(jitted_output.shape , output.shape ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : List[str] = model_class(_A ) UpperCAmelCase__ : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) UpperCAmelCase__ : Tuple = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(_A : Optional[int] , _A : List[Any] , _A : int ): return model.decode( decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase__ : Any = decode_jitted(**_A ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase__ : Optional[int] = decode_jitted(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) ) for jitted_output, output in zip(_A , _A ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowercase_ ( self : List[str] ): '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids UpperCAmelCase__ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id UpperCAmelCase__ : Union[str, Any] = model(_A ) self.assertIsNotNone(_A ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25} UpperCAmelCase__ : int = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} UpperCAmelCase__ : str = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_A ) UpperCAmelCase__ : Optional[Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) UpperCAmelCase__ : Optional[Any] = ['''Sam'''] UpperCAmelCase__ : Dict = tokenizer(_A , return_tensors='''jax''' ) UpperCAmelCase__ : List[str] = model.generate(**_A , **_A ) UpperCAmelCase__ : Dict = '''Sam is a great name. It means "sun" in Gaelic.''' UpperCAmelCase__ : Any = tokenizer.batch_decode(_A , **_A ) assert generated_txt[0].strip() == tgt_text
75
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCamelCase_ : def __init__( self : Optional[int] , _A : Optional[Any] , _A : Tuple=2 , _A : Tuple=3 , _A : Optional[Any]=4 , _A : List[Any]=2 , _A : List[Any]=7 , _A : int=True , _A : Dict=True , _A : int=True , _A : Dict=True , _A : Tuple=99 , _A : Union[str, Any]=36 , _A : int=2 , _A : List[str]=4 , _A : int=37 , _A : List[Any]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Tuple=2 , _A : Union[str, Any]=0.0_2 , _A : Any=6 , _A : Union[str, Any]=6 , _A : str=3 , _A : str=4 , _A : Tuple=None , _A : int=1_000 , ): '''simple docstring''' UpperCAmelCase__ : int = parent UpperCAmelCase__ : Optional[int] = batch_size UpperCAmelCase__ : str = num_channels UpperCAmelCase__ : str = image_size UpperCAmelCase__ : List[str] = patch_size UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : List[str] = use_input_mask UpperCAmelCase__ : Tuple = use_token_type_ids UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : int = vocab_size UpperCAmelCase__ : List[Any] = hidden_size UpperCAmelCase__ : Optional[int] = num_hidden_layers UpperCAmelCase__ : List[str] = num_attention_heads UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Dict = hidden_act UpperCAmelCase__ : int = hidden_dropout_prob UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase__ : List[str] = max_position_embeddings UpperCAmelCase__ : Tuple = type_vocab_size UpperCAmelCase__ : Any = type_sequence_label_size UpperCAmelCase__ : List[str] = initializer_range UpperCAmelCase__ : List[str] = coordinate_size UpperCAmelCase__ : Tuple = shape_size UpperCAmelCase__ : Optional[int] = num_labels UpperCAmelCase__ : Optional[Any] = num_choices UpperCAmelCase__ : Union[str, Any] = scope UpperCAmelCase__ : Optional[Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) UpperCAmelCase__ : str = text_seq_length UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1 UpperCAmelCase__ : Tuple = self.text_seq_length + self.image_seq_length def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) UpperCAmelCase__ : int = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase__ : str = bbox[i, j, 3] UpperCAmelCase__ : Dict = bbox[i, j, 1] UpperCAmelCase__ : str = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase__ : Optional[int] = bbox[i, j, 2] UpperCAmelCase__ : Any = bbox[i, j, 0] UpperCAmelCase__ : List[Any] = tmp_coordinate UpperCAmelCase__ : str = tf.constant(_A ) UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Any = None if self.use_input_mask: UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] ) UpperCAmelCase__ : Any = None if self.use_token_type_ids: UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : List[str] = None if self.use_labels: UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : int = TFLayoutLMvaModel(config=_A ) # text + image UpperCAmelCase__ : Tuple = model(_A , pixel_values=_A , training=_A ) UpperCAmelCase__ : Tuple = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , ) UpperCAmelCase__ : Optional[Any] = model(_A , bbox=_A , pixel_values=_A , training=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only UpperCAmelCase__ : Any = model(_A , training=_A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only UpperCAmelCase__ : str = model({'''pixel_values''': pixel_values} , training=_A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Any , _A : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.num_labels UpperCAmelCase__ : int = TFLayoutLMvaForSequenceClassification(config=_A ) UpperCAmelCase__ : Union[str, Any] = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self : Dict , _A : List[Any] , _A : Any , _A : Dict , _A : str , _A : Optional[int] , _A : str , _A : str ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.num_labels UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=_A ) UpperCAmelCase__ : Optional[int] = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowercase_ ( self : Dict , _A : Dict , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Tuple , _A : Dict , _A : str ): '''simple docstring''' UpperCAmelCase__ : str = 2 UpperCAmelCase__ : Dict = TFLayoutLMvaForQuestionAnswering(config=_A ) UpperCAmelCase__ : str = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : int = self.prepare_config_and_inputs() ((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = config_and_inputs UpperCAmelCase__ : List[Any] = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowerCAmelCase__ = ( {'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel} if is_tf_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Dict , _A : List[str] ): '''simple docstring''' return True def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Any , _A : Dict=False ): '''simple docstring''' UpperCAmelCase__ : List[Any] = copy.deepcopy(_A ) if model_class in get_values(_A ): UpperCAmelCase__ : Tuple = { k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(_A , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(_A ): UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_A ): UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_A ): UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_A ): UpperCAmelCase__ : int = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Any = TFLayoutLMvaModelTester(self ) UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 ) def lowercase_ ( self : str ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Optional[Any] = model_class(_A ) if getattr(_A , '''hf_compute_loss''' , _A ): # The number of elements in the loss should be the same as the number of elements in the label UpperCAmelCase__ : Tuple = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : List[Any] = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0] ] UpperCAmelCase__ : Optional[Any] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs UpperCAmelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' ) UpperCAmelCase__ : List[Any] = model(_A , **_A )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' ) if "labels" in prepared_for_class: UpperCAmelCase__ : Optional[Any] = prepared_for_class['''labels'''].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: UpperCAmelCase__ : Any = -100 UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(_A ) UpperCAmelCase__ : int = model(_A , **_A )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : Dict = model(_A )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple UpperCAmelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) # Get keys that were added with the _prepare_for_class function UpperCAmelCase__ : Optional[int] = prepared_for_class.keys() - inputs_dict.keys() UpperCAmelCase__ : int = inspect.signature(model.call ).parameters UpperCAmelCase__ : Union[str, Any] = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple UpperCAmelCase__ : Dict = {0: '''input_ids'''} for label_key in label_keys: UpperCAmelCase__ : str = signature_names.index(_A ) UpperCAmelCase__ : List[Any] = label_key UpperCAmelCase__ : Dict = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple UpperCAmelCase__ : Tuple = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: UpperCAmelCase__ : Any = prepared_for_class[value] UpperCAmelCase__ : Tuple = tuple(_A ) # Send to model UpperCAmelCase__ : Optional[Any] = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def lowercase_ ( self : int ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A ) def lowercase_ ( self : Tuple ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase__ : Union[str, Any] = type self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A ) def lowercase_ ( self : List[str] ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( _A , _A , _A , _A , _A , _A , _A ) def lowercase_ ( self : Any ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( _A , _A , _A , _A , _A , _A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( _A , _A , _A , _A , _A , _A , _A ) @slow def lowercase_ ( self : List[Any] ): '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def a__ ( ) -> List[str]: UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf class lowerCamelCase_ ( unittest.TestCase ): @cached_property def lowercase_ ( self : Dict ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None @slow def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ) UpperCAmelCase__ : Dict = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''tf''' ).pixel_values UpperCAmelCase__ : str = tf.constant([[1, 2]] ) UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass UpperCAmelCase__ : int = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A ) # verify the logits UpperCAmelCase__ : Optional[int] = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , _A ) UpperCAmelCase__ : Dict = tf.constant( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
75
1
'''simple docstring''' import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) def a__ ( lowerCAmelCase__ ) -> Optional[int]: UpperCAmelCase__ : str = SwinConfig.from_pretrained( '''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) UpperCAmelCase__ : Optional[Any] = MaskFormerConfig(backbone_config=lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = '''huggingface/label-files''' if "ade20k-full" in model_name: # this should be ok UpperCAmelCase__ : Tuple = 8_47 UpperCAmelCase__ : int = '''maskformer-ade20k-full-id2label.json''' elif "ade" in model_name: # this should be ok UpperCAmelCase__ : Optional[int] = 1_50 UpperCAmelCase__ : Tuple = '''ade20k-id2label.json''' elif "coco-stuff" in model_name: # this should be ok UpperCAmelCase__ : Any = 1_71 UpperCAmelCase__ : List[Any] = '''maskformer-coco-stuff-id2label.json''' elif "coco" in model_name: # TODO UpperCAmelCase__ : Optional[Any] = 1_33 UpperCAmelCase__ : Union[str, Any] = '''coco-panoptic-id2label.json''' elif "cityscapes" in model_name: # this should be ok UpperCAmelCase__ : Any = 19 UpperCAmelCase__ : str = '''cityscapes-id2label.json''' elif "vistas" in model_name: # this should be ok UpperCAmelCase__ : Dict = 65 UpperCAmelCase__ : str = '''mapillary-vistas-id2label.json''' UpperCAmelCase__ : Dict = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase__ : Dict = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()} return config def a__ ( lowerCAmelCase__ ) -> Tuple: UpperCAmelCase__ : Union[str, Any] = [] # stem # fmt: off rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') ) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") ) rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") ) # FPN rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') ) rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') ) rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") ) rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") ) rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") ) rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') ) rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") ) # cross-attention out projection rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") ) # MLP 1 rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") ) # MLP 2 rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") ) # layernorm 1 (self-attention layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") ) # layernorm 3 (final layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") ) rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') ) rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') ) # heads on top rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') ) rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') ) rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') ) rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') ) rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') ) for i in range(3 ): rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") ) # fmt: on return rename_keys def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: UpperCAmelCase__ : Tuple = dct.pop(lowerCAmelCase__ ) UpperCAmelCase__ : Dict = val def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: UpperCAmelCase__ : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): UpperCAmelCase__ : Any = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) UpperCAmelCase__ : Dict = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" ) UpperCAmelCase__ : int = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase__ : Dict = in_proj_weight[:dim, :] UpperCAmelCase__ : Optional[int] = in_proj_bias[: dim] UpperCAmelCase__ : Union[str, Any] = in_proj_weight[ dim : dim * 2, : ] UpperCAmelCase__ : List[str] = in_proj_bias[ dim : dim * 2 ] UpperCAmelCase__ : int = in_proj_weight[ -dim :, : ] UpperCAmelCase__ : Optional[Any] = in_proj_bias[-dim :] # fmt: on def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: # fmt: off UpperCAmelCase__ : Optional[Any] = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) UpperCAmelCase__ : Optional[int] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" ) UpperCAmelCase__ : Optional[int] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase__ : Any = in_proj_weight[: hidden_size, :] UpperCAmelCase__ : str = in_proj_bias[:config.hidden_size] UpperCAmelCase__ : Dict = in_proj_weight[hidden_size : hidden_size * 2, :] UpperCAmelCase__ : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2] UpperCAmelCase__ : List[str] = in_proj_weight[-hidden_size :, :] UpperCAmelCase__ : Any = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) UpperCAmelCase__ : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" ) UpperCAmelCase__ : Optional[int] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase__ : Any = in_proj_weight[: hidden_size, :] UpperCAmelCase__ : Union[str, Any] = in_proj_bias[:config.hidden_size] UpperCAmelCase__ : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :] UpperCAmelCase__ : int = in_proj_bias[hidden_size : hidden_size * 2] UpperCAmelCase__ : int = in_proj_weight[-hidden_size :, :] UpperCAmelCase__ : str = in_proj_bias[-hidden_size :] # fmt: on def a__ ( ) -> torch.Tensor: UpperCAmelCase__ : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase__ : Any = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ) return im @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> int: UpperCAmelCase__ : str = get_maskformer_config(lowerCAmelCase__ ) # load original state_dict with open(lowerCAmelCase__ , '''rb''' ) as f: UpperCAmelCase__ : List[Any] = pickle.load(lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = data['''model'''] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys UpperCAmelCase__ : List[str] = create_rename_keys(lowerCAmelCase__ ) for src, dest in rename_keys: rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) read_in_swin_q_k_v(lowerCAmelCase__ , config.backbone_config ) read_in_decoder_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ ) # update to torch tensors for key, value in state_dict.items(): UpperCAmelCase__ : Any = torch.from_numpy(lowerCAmelCase__ ) # load 🤗 model UpperCAmelCase__ : Union[str, Any] = MaskFormerForInstanceSegmentation(lowerCAmelCase__ ) model.eval() for name, param in model.named_parameters(): print(lowerCAmelCase__ , param.shape ) UpperCAmelCase__ , UpperCAmelCase__ : str = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(lowerCAmelCase__ ) == 0, F"""Unexpected keys: {unexpected_keys}""" # verify results UpperCAmelCase__ : Union[str, Any] = prepare_img() if "vistas" in model_name: UpperCAmelCase__ : Optional[Any] = 65 elif "cityscapes" in model_name: UpperCAmelCase__ : Optional[int] = 6_55_35 else: UpperCAmelCase__ : List[Any] = 2_55 UpperCAmelCase__ : Any = True if '''ade''' in model_name else False UpperCAmelCase__ : Tuple = MaskFormerImageProcessor(ignore_index=lowerCAmelCase__ , reduce_labels=lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = image_processor(lowerCAmelCase__ , return_tensors='''pt''' ) UpperCAmelCase__ : List[str] = model(**lowerCAmelCase__ ) print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": UpperCAmelCase__ : List[Any] = torch.tensor( [[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" ) Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ ) model.save_pretrained(lowerCAmelCase__ ) image_processor.save_pretrained(lowerCAmelCase__ ) if push_to_hub: print('''Pushing model and image processor to the hub...''' ) model.push_to_hub(F"""nielsr/{model_name}""" ) image_processor.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''maskformer-swin-tiny-ade''', type=str, help=('''Name of the MaskFormer model you\'d like to convert''',), ) parser.add_argument( '''--checkpoint_path''', default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''', type=str, help='''Path to the original state dict (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) UpperCamelCase__ = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
75
'''simple docstring''' import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": UpperCamelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') UpperCamelCase__ = F"""https://www.google.com/search?q={query}&num=100""" UpperCamelCase__ = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: UpperCamelCase__ = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: UpperCamelCase__ = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
75
1
'''simple docstring''' from scipy.stats import spearmanr import datasets UpperCamelCase__ = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' UpperCamelCase__ = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' UpperCamelCase__ = R'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): def lowercase_ ( self : List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , ) def lowercase_ ( self : Optional[int] , _A : int , _A : Tuple , _A : List[str]=False ): '''simple docstring''' UpperCAmelCase__ : str = spearmanr(_A , _A ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
75
'''simple docstring''' from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray: UpperCAmelCase__ : List[str] = cva.getAffineTransform(lowerCAmelCase__ , lowerCAmelCase__ ) return cva.warpAffine(lowerCAmelCase__ , lowerCAmelCase__ , (rows, cols) ) if __name__ == "__main__": # read original image UpperCamelCase__ = cva.imread( str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''') ) # turn image in gray scale value UpperCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape UpperCamelCase__ , UpperCamelCase__ = gray_img.shape # set different points to rotate image UpperCamelCase__ = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa) UpperCamelCase__ = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa) UpperCamelCase__ = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa) UpperCamelCase__ = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa) # add all rotated images in a list UpperCamelCase__ = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations UpperCamelCase__ = plt.figure(1) UpperCamelCase__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3'''] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''') plt.title(titles[i]) plt.axis('''off''') plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
75
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class lowerCamelCase_ : def __init__( self : List[str] , _A : Union[str, Any] , _A : Tuple=13 , _A : Optional[int]=7 , _A : Optional[Any]=True , _A : Tuple=True , _A : Union[str, Any]=True , _A : Any=99 , _A : Optional[int]=32 , _A : Optional[Any]=5 , _A : Optional[Any]=4 , _A : Tuple=37 , _A : Tuple="gelu" , _A : int=0.1 , _A : Dict=0.1 , _A : Any=512 , _A : Union[str, Any]=16 , _A : Optional[int]=2 , _A : List[str]=0.0_2 , _A : Optional[Any]=3 , _A : Dict=4 , _A : int=None , ): '''simple docstring''' UpperCAmelCase__ : int = parent UpperCAmelCase__ : List[Any] = batch_size UpperCAmelCase__ : Optional[int] = seq_length UpperCAmelCase__ : Union[str, Any] = is_training UpperCAmelCase__ : Any = use_token_type_ids UpperCAmelCase__ : Any = use_labels UpperCAmelCase__ : Tuple = vocab_size UpperCAmelCase__ : Optional[int] = hidden_size UpperCAmelCase__ : Union[str, Any] = num_hidden_layers UpperCAmelCase__ : List[Any] = num_attention_heads UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Dict = max_position_embeddings UpperCAmelCase__ : Any = type_vocab_size UpperCAmelCase__ : Optional[Any] = type_sequence_label_size UpperCAmelCase__ : Dict = initializer_range UpperCAmelCase__ : Dict = num_labels UpperCAmelCase__ : Optional[Any] = num_choices UpperCAmelCase__ : int = scope UpperCAmelCase__ : List[Any] = self.vocab_size - 1 def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : Optional[int] = None if self.use_token_type_ids: UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ : List[str] = None UpperCAmelCase__ : str = None UpperCAmelCase__ : Any = None if self.use_labels: UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ : Optional[int] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) UpperCAmelCase__ : str = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def lowercase_ ( self : Tuple , _A : Tuple , _A : Optional[int] , _A : int , _A : Tuple , *_A : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = OpenAIGPTModel(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : Optional[int] = model(_A , token_type_ids=_A , head_mask=_A ) UpperCAmelCase__ : Union[str, Any] = model(_A , token_type_ids=_A ) UpperCAmelCase__ : Optional[int] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ ( self : Union[str, Any] , _A : str , _A : Dict , _A : List[str] , _A : int , *_A : str ): '''simple docstring''' UpperCAmelCase__ : Any = OpenAIGPTLMHeadModel(_A ) model.to(_A ) model.eval() UpperCAmelCase__ : List[str] = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : Dict , _A : Dict , _A : List[Any] , *_A : str ): '''simple docstring''' UpperCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel(_A ) model.to(_A ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase_ ( self : str , _A : int , _A : Tuple , _A : Optional[int] , _A : Tuple , *_A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.num_labels UpperCAmelCase__ : List[str] = OpenAIGPTForSequenceClassification(_A ) model.to(_A ) model.eval() UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : str = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : List[str] = config_and_inputs UpperCAmelCase__ : Union[str, Any] = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_torch class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ): lowerCAmelCase__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) lowerCAmelCase__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly lowerCAmelCase__ = ( { 'feature-extraction': OpenAIGPTModel, 'text-classification': OpenAIGPTForSequenceClassification, 'text-generation': OpenAIGPTLMHeadModel, 'zero-shot': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def lowercase_ ( self : str , _A : Tuple , _A : Union[str, Any] , _A : Tuple , _A : Tuple , _A : Dict ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def lowercase_ ( self : Union[str, Any] , _A : List[Any] , _A : int , _A : Union[str, Any]=False ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": UpperCAmelCase__ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_A , ) UpperCAmelCase__ : List[str] = inputs_dict['''labels'''] UpperCAmelCase__ : Optional[Any] = inputs_dict['''labels'''] UpperCAmelCase__ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_A , ) UpperCAmelCase__ : List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) return inputs_dict def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = OpenAIGPTModelTester(self ) UpperCAmelCase__ : int = ConfigTester(self , config_class=_A , n_embd=37 ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_A ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_A ) @slow def lowercase_ ( self : Dict ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch class lowerCamelCase_ ( unittest.TestCase ): @slow def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : int = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' ) model.to(_A ) UpperCAmelCase__ : List[str] = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=_A ) # the president is UpperCAmelCase__ : str = [ 481, 4_735, 544, 246, 963, 870, 762, 239, 244, 40_477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the UpperCAmelCase__ : Tuple = model.generate(_A , do_sample=_A ) self.assertListEqual(output_ids[0].tolist() , _A )
75
'''simple docstring''' from datetime import datetime as dt import os from github import Github UpperCamelCase__ = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''feature request''', '''new model''', '''wip''', ] def a__ ( ) -> List[str]: UpperCAmelCase__ : int = Github(os.environ['''GITHUB_TOKEN'''] ) UpperCAmelCase__ : List[Any] = g.get_repo('''huggingface/transformers''' ) UpperCAmelCase__ : List[str] = repo.get_issues(state='''open''' ) for issue in open_issues: UpperCAmelCase__ : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = comments[0] if len(lowerCAmelCase__ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
75
1
'''simple docstring''' import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class lowerCamelCase_ : def __init__( self : Union[str, Any] , _A : List[Any] , _A : Any=13 , _A : Optional[int]=7 , _A : Optional[Any]=False , _A : Tuple=True , _A : Tuple=False , _A : Union[str, Any]=False , _A : Optional[Any]=19 , _A : str=32 , _A : str=5 , _A : Any=4 , _A : Optional[int]=37 , _A : int="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : List[str]=512 , _A : Union[str, Any]=16 , _A : Union[str, Any]=2 , _A : Dict=0.0_2 , _A : Union[str, Any]=3 , _A : str=4 , _A : Tuple=None , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = parent UpperCAmelCase__ : Tuple = batch_size UpperCAmelCase__ : List[Any] = seq_length UpperCAmelCase__ : Optional[int] = is_training UpperCAmelCase__ : Union[str, Any] = use_input_mask UpperCAmelCase__ : Optional[Any] = use_token_type_ids UpperCAmelCase__ : List[Any] = use_labels UpperCAmelCase__ : Any = vocab_size UpperCAmelCase__ : Optional[int] = hidden_size UpperCAmelCase__ : Dict = num_hidden_layers UpperCAmelCase__ : List[str] = num_attention_heads UpperCAmelCase__ : Optional[int] = intermediate_size UpperCAmelCase__ : Tuple = hidden_act UpperCAmelCase__ : List[str] = hidden_dropout_prob UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob UpperCAmelCase__ : List[Any] = max_position_embeddings UpperCAmelCase__ : List[Any] = type_vocab_size UpperCAmelCase__ : Tuple = type_sequence_label_size UpperCAmelCase__ : str = initializer_range UpperCAmelCase__ : List[Any] = num_labels UpperCAmelCase__ : Optional[int] = num_choices UpperCAmelCase__ : Optional[Any] = scope def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : int = None UpperCAmelCase__ : int = None UpperCAmelCase__ : Optional[Any] = None if self.use_labels: UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ : Tuple = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_A , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , ) return config def lowercase_ ( self : Optional[int] , _A : List[Any] , _A : str , _A : Optional[int] , _A : Dict , _A : Optional[Any] , _A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = EsmForProteinFolding(config=_A ).float() model.to(_A ) model.eval() UpperCAmelCase__ : Optional[int] = model(_A , attention_mask=_A ) UpperCAmelCase__ : Optional[int] = model(_A ) UpperCAmelCase__ : Any = model(_A ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : str = config_and_inputs UpperCAmelCase__ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = False lowerCAmelCase__ = (EsmForProteinFolding,) if is_torch_available() else () lowerCAmelCase__ = () lowerCAmelCase__ = {} if is_torch_available() else {} lowerCAmelCase__ = False def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = EsmFoldModelTester(self ) UpperCAmelCase__ : Dict = ConfigTester(self , config_class=_A , hidden_size=37 ) def lowercase_ ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) @unittest.skip('''Does not support attention outputs''' ) def lowercase_ ( self : List[Any] ): '''simple docstring''' pass @unittest.skip def lowercase_ ( self : List[Any] ): '''simple docstring''' pass @unittest.skip('''Esm does not support embedding resizing''' ) def lowercase_ ( self : List[Any] ): '''simple docstring''' pass @unittest.skip('''Esm does not support embedding resizing''' ) def lowercase_ ( self : Dict ): '''simple docstring''' pass @unittest.skip('''ESMFold does not support passing input embeds!''' ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip('''ESMFold does not support head pruning.''' ) def lowercase_ ( self : List[Any] ): '''simple docstring''' pass @unittest.skip('''ESMFold does not support head pruning.''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' pass @unittest.skip('''ESMFold does not support head pruning.''' ) def lowercase_ ( self : int ): '''simple docstring''' pass @unittest.skip('''ESMFold does not support head pruning.''' ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip('''ESMFold does not support head pruning.''' ) def lowercase_ ( self : List[Any] ): '''simple docstring''' pass @unittest.skip('''ESMFold does not output hidden states in the normal way.''' ) def lowercase_ ( self : Any ): '''simple docstring''' pass @unittest.skip('''ESMfold does not output hidden states in the normal way.''' ) def lowercase_ ( self : List[str] ): '''simple docstring''' pass @unittest.skip('''ESMFold only has one output format.''' ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' pass @unittest.skip('''ESMFold does not support input chunking.''' ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' ) def lowercase_ ( self : Dict ): '''simple docstring''' pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def lowercase_ ( self : int ): '''simple docstring''' pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def lowercase_ ( self : List[str] ): '''simple docstring''' pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def lowercase_ ( self : str ): '''simple docstring''' pass @unittest.skip('''ESMFold doesn\'t support data parallel.''' ) def lowercase_ ( self : str ): '''simple docstring''' pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowercase_ ( self : str ): '''simple docstring''' pass @require_torch class lowerCamelCase_ ( __a ): @slow def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float() model.eval() UpperCAmelCase__ : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) UpperCAmelCase__ : Optional[int] = model(_A )['''positions'''] UpperCAmelCase__ : Union[str, Any] = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _A , atol=1e-4 ) )
75
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCamelCase_ ( __a ): def __init__( self : Dict , _A : List[str] , _A : int ): '''simple docstring''' super().__init__() self.register_modules(unet=_A , scheduler=_A ) @torch.no_grad() def __call__( self : List[Any] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ): '''simple docstring''' if audio_length_in_s is None: UpperCAmelCase__ : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate UpperCAmelCase__ : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate UpperCAmelCase__ : List[Any] = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) UpperCAmelCase__ : List[Any] = int(_A ) if sample_size % down_scale_factor != 0: UpperCAmelCase__ : int = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" ''' process.''' ) UpperCAmelCase__ : Dict = int(_A ) UpperCAmelCase__ : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype UpperCAmelCase__ : int = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(_A , _A ) and len(_A ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) UpperCAmelCase__ : Optional[int] = randn_tensor(_A , generator=_A , device=self.device , dtype=_A ) # set step values self.scheduler.set_timesteps(_A , device=audio.device ) UpperCAmelCase__ : List[str] = self.scheduler.timesteps.to(_A ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCAmelCase__ : Optional[int] = self.unet(_A , _A ).sample # 2. compute previous image: x_t -> t_t-1 UpperCAmelCase__ : List[Any] = self.scheduler.step(_A , _A , _A ).prev_sample UpperCAmelCase__ : Any = audio.clamp(-1 , 1 ).float().cpu().numpy() UpperCAmelCase__ : Any = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=_A )
75
1
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase_ : def __init__( self : Optional[Any] , _A : List[Any] , _A : Tuple=12 , _A : Tuple=7 , _A : str=True , _A : List[Any]=True , _A : Any=True , _A : Optional[Any]=99 , _A : Tuple=32 , _A : Dict=32 , _A : Tuple=2 , _A : Any=4 , _A : Dict=37 , _A : int=0.1 , _A : List[str]=0.1 , _A : Optional[int]=512 , _A : int=0.0_2 , _A : List[Any]=0 , _A : Union[str, Any]=None , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = parent UpperCAmelCase__ : Union[str, Any] = batch_size UpperCAmelCase__ : Tuple = seq_length UpperCAmelCase__ : Optional[int] = is_training UpperCAmelCase__ : Optional[int] = use_input_mask UpperCAmelCase__ : Tuple = use_labels UpperCAmelCase__ : Dict = vocab_size UpperCAmelCase__ : str = hidden_size UpperCAmelCase__ : List[str] = projection_dim UpperCAmelCase__ : Tuple = num_hidden_layers UpperCAmelCase__ : List[Any] = num_attention_heads UpperCAmelCase__ : List[Any] = intermediate_size UpperCAmelCase__ : List[Any] = dropout UpperCAmelCase__ : int = attention_dropout UpperCAmelCase__ : Optional[int] = max_position_embeddings UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : List[str] = scope UpperCAmelCase__ : Union[str, Any] = bos_token_id def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : Tuple = None if self.use_input_mask: UpperCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: UpperCAmelCase__ : Optional[int] = input_mask.numpy() UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = input_mask.shape UpperCAmelCase__ : Dict = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(_A ): UpperCAmelCase__ : str = 1 UpperCAmelCase__ : List[Any] = 0 UpperCAmelCase__ : Optional[int] = self.get_config() return config, input_ids, tf.convert_to_tensor(_A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def lowercase_ ( self : Optional[Any] , _A : int , _A : Dict , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = TFBlipTextModel(config=_A ) UpperCAmelCase__ : Optional[int] = model(_A , attention_mask=_A , training=_A ) UpperCAmelCase__ : List[Any] = model(_A , training=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = config_and_inputs UpperCAmelCase__ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = (TFBlipTextModel,) if is_tf_available() else () lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = BlipTextModelTester(self ) UpperCAmelCase__ : str = ConfigTester(self , config_class=_A , hidden_size=37 ) def lowercase_ ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def lowercase_ ( self : Any ): '''simple docstring''' pass def lowercase_ ( self : int ): '''simple docstring''' pass @unittest.skip(reason='''Blip does not use inputs_embeds''' ) def lowercase_ ( self : str ): '''simple docstring''' pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def lowercase_ ( self : List[Any] ): '''simple docstring''' pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' pass @slow def lowercase_ ( self : Optional[Any] ): '''simple docstring''' for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Any = TFBlipTextModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def lowercase_ ( self : Any , _A : Tuple=True ): '''simple docstring''' super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
75
'''simple docstring''' from math import factorial def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float: if successes > trials: raise ValueError('''successes must be lower or equal to trials''' ) if trials < 0 or successes < 0: raise ValueError('''the function is defined for non-negative integers''' ) if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError('''the function is defined for non-negative integers''' ) if not 0 < prob < 1: raise ValueError('''prob has to be in range of 1 - 0''' ) UpperCAmelCase__ : Any = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! UpperCAmelCase__ : Any = float(factorial(lowerCAmelCase__ ) ) coefficient /= factorial(lowerCAmelCase__ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('''Probability of 2 successes out of 4 trails''') print('''with probability of 0.75 is:''', end=''' ''') print(binomial_distribution(2, 4, 0.75))
75
1
'''simple docstring''' import json import sys def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f: UpperCAmelCase__ : Union[str, Any] = json.load(lowerCAmelCase__ ) UpperCAmelCase__ : str = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' '''] for benchmark_name in sorted(lowerCAmelCase__ ): UpperCAmelCase__ : int = results[benchmark_name] UpperCAmelCase__ : Optional[int] = benchmark_name.split('''/''' )[-1] output_md.append(F"""### Benchmark: {benchmark_file_name}""" ) UpperCAmelCase__ : Optional[Any] = '''| metric |''' UpperCAmelCase__ : str = '''|--------|''' UpperCAmelCase__ : List[Any] = '''| new / old (diff) |''' for metric_name in sorted(lowerCAmelCase__ ): UpperCAmelCase__ : Union[str, Any] = benchmark_res[metric_name] UpperCAmelCase__ : List[Any] = metric_vals['''new'''] UpperCAmelCase__ : int = metric_vals.get('''old''' , lowerCAmelCase__ ) UpperCAmelCase__ : Union[str, Any] = metric_vals.get('''diff''' , lowerCAmelCase__ ) UpperCAmelCase__ : Any = F""" {new_val:f}""" if isinstance(lowerCAmelCase__ , (int, float) ) else '''None''' if old_val is not None: val_str += F""" / {old_val:f}""" if isinstance(lowerCAmelCase__ , (int, float) ) else "None" if dif_val is not None: val_str += F""" ({dif_val:f})""" if isinstance(lowerCAmelCase__ , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append('''</details>''' ) with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.writelines('''\n'''.join(lowerCAmelCase__ ) ) if __name__ == "__main__": UpperCamelCase__ = sys.argv[1] UpperCamelCase__ = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
75
'''simple docstring''' from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCamelCase__ = logging.get_logger(__name__) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = ['pixel_values'] def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_A : int , ): '''simple docstring''' super().__init__(**_A ) UpperCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 224} UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A ) UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase__ : List[str] = get_size_dict(_A , param_name='''crop_size''' ) UpperCAmelCase__ : str = do_resize UpperCAmelCase__ : List[Any] = size UpperCAmelCase__ : int = resample UpperCAmelCase__ : int = do_center_crop UpperCAmelCase__ : List[str] = crop_size UpperCAmelCase__ : Union[str, Any] = do_rescale UpperCAmelCase__ : Optional[int] = rescale_factor UpperCAmelCase__ : List[Any] = do_normalize UpperCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN UpperCAmelCase__ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD def lowercase_ ( self : str , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = get_size_dict(_A , default_to_square=_A ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: UpperCAmelCase__ : Tuple = int((256 / 224) * size['''shortest_edge'''] ) UpperCAmelCase__ : Tuple = get_resize_output_image_size(_A , size=_A , default_to_square=_A ) UpperCAmelCase__ : Dict = {'''height''': output_size[0], '''width''': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" ) return resize( _A , size=(size_dict['''height'''], size_dict['''width''']) , resample=_A , data_format=_A , **_A ) def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A ) def lowercase_ ( self : List[str] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ): '''simple docstring''' return rescale(_A , scale=_A , data_format=_A , **_A ) def lowercase_ ( self : Dict , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ): '''simple docstring''' return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def lowercase_ ( self : Optional[Any] , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[TensorType] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ): '''simple docstring''' UpperCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize UpperCAmelCase__ : Optional[int] = resample if resample is not None else self.resample UpperCAmelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean UpperCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std UpperCAmelCase__ : Tuple = size if size is not None else self.size UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A ) UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase__ : int = get_size_dict(_A , param_name='''crop_size''' ) UpperCAmelCase__ : Union[str, Any] = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase__ : int = [to_numpy_array(_A ) for image in images] if do_resize: UpperCAmelCase__ : str = [self.resize(_A , _A , _A ) for image in images] if do_center_crop: UpperCAmelCase__ : Tuple = [self.center_crop(_A , _A ) for image in images] if do_rescale: UpperCAmelCase__ : Optional[int] = [self.rescale(_A , _A ) for image in images] if do_normalize: UpperCAmelCase__ : Any = [self.normalize(_A , _A , _A ) for image in images] UpperCAmelCase__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images] UpperCAmelCase__ : Dict = {'''pixel_values''': images} return BatchFeature(data=_A , tensor_type=_A )
75
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase__ = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ['''PLBartTokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PLBartForCausalLM''', '''PLBartForConditionalGeneration''', '''PLBartForSequenceClassification''', '''PLBartModel''', '''PLBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
75
'''simple docstring''' import math def a__ ( ) -> None: UpperCAmelCase__ : List[str] = input('''Enter message: ''' ) UpperCAmelCase__ : Any = int(input(F"""Enter key [2-{len(lowerCAmelCase__ ) - 1}]: """ ) ) UpperCAmelCase__ : List[str] = input('''Encryption/Decryption [e/d]: ''' ) if mode.lower().startswith('''e''' ): UpperCAmelCase__ : Dict = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ ) elif mode.lower().startswith('''d''' ): UpperCAmelCase__ : Optional[int] = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(F"""Output:\n{text + "|"}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: UpperCAmelCase__ : Optional[int] = [''''''] * key for col in range(lowerCAmelCase__ ): UpperCAmelCase__ : Tuple = col while pointer < len(lowerCAmelCase__ ): cipher_text[col] += message[pointer] pointer += key return "".join(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: UpperCAmelCase__ : int = math.ceil(len(lowerCAmelCase__ ) / key ) UpperCAmelCase__ : Any = key UpperCAmelCase__ : Optional[int] = (num_cols * num_rows) - len(lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = [''''''] * num_cols UpperCAmelCase__ : List[str] = 0 UpperCAmelCase__ : List[Any] = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): UpperCAmelCase__ : Optional[int] = 0 row += 1 return "".join(lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
75
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class lowerCamelCase_ : # setable values lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = None # sigma(t_i) @classmethod def lowercase_ ( cls : Dict ): '''simple docstring''' return cls() @dataclass class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 class lowerCamelCase_ ( __a , __a ): @property def lowercase_ ( self : Tuple ): '''simple docstring''' return True @register_to_config def __init__( self : int , _A : float = 0.0_2 , _A : float = 100 , _A : float = 1.0_0_7 , _A : float = 80 , _A : float = 0.0_5 , _A : float = 50 , ): '''simple docstring''' pass def lowercase_ ( self : List[str] ): '''simple docstring''' return KarrasVeSchedulerState.create() def lowercase_ ( self : int , _A : KarrasVeSchedulerState , _A : int , _A : Tuple = () ): '''simple docstring''' UpperCAmelCase__ : Any = jnp.arange(0 , _A )[::-1].copy() UpperCAmelCase__ : Union[str, Any] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=_A , schedule=jnp.array(_A , dtype=jnp.floataa ) , timesteps=_A , ) def lowercase_ ( self : List[str] , _A : KarrasVeSchedulerState , _A : jnp.ndarray , _A : float , _A : random.KeyArray , ): '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: UpperCAmelCase__ : str = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: UpperCAmelCase__ : Union[str, Any] = 0 # sample eps ~ N(0, S_noise^2 * I) UpperCAmelCase__ : Optional[Any] = random.split(_A , num=1 ) UpperCAmelCase__ : Union[str, Any] = self.config.s_noise * random.normal(key=_A , shape=sample.shape ) UpperCAmelCase__ : Dict = sigma + gamma * sigma UpperCAmelCase__ : Union[str, Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def lowercase_ ( self : Dict , _A : KarrasVeSchedulerState , _A : jnp.ndarray , _A : float , _A : float , _A : jnp.ndarray , _A : bool = True , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = sample_hat + sigma_hat * model_output UpperCAmelCase__ : Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat UpperCAmelCase__ : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=_A , derivative=_A , state=_A ) def lowercase_ ( self : Dict , _A : KarrasVeSchedulerState , _A : jnp.ndarray , _A : float , _A : float , _A : jnp.ndarray , _A : jnp.ndarray , _A : jnp.ndarray , _A : bool = True , ): '''simple docstring''' UpperCAmelCase__ : Dict = sample_prev + sigma_prev * model_output UpperCAmelCase__ : Any = (sample_prev - pred_original_sample) / sigma_prev UpperCAmelCase__ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=_A , derivative=_A , state=_A ) def lowercase_ ( self : Optional[int] , _A : KarrasVeSchedulerState , _A : Union[str, Any] , _A : int , _A : Any ): '''simple docstring''' raise NotImplementedError()
75
'''simple docstring''' class lowerCamelCase_ : def __init__( self : str , _A : Union[str, Any] , _A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = name UpperCAmelCase__ : Union[str, Any] = val def __str__( self : Tuple ): '''simple docstring''' return f"""{self.__class__.__name__}({self.name}, {self.val})""" def __lt__( self : Union[str, Any] , _A : Dict ): '''simple docstring''' return self.val < other.val class lowerCamelCase_ : def __init__( self : int , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = {} UpperCAmelCase__ : int = {} UpperCAmelCase__ : Any = self.build_heap(_A ) def __getitem__( self : Any , _A : Any ): '''simple docstring''' return self.get_value(_A ) def lowercase_ ( self : Any , _A : List[Any] ): '''simple docstring''' return (idx - 1) // 2 def lowercase_ ( self : Union[str, Any] , _A : Optional[int] ): '''simple docstring''' return idx * 2 + 1 def lowercase_ ( self : Tuple , _A : List[Any] ): '''simple docstring''' return idx * 2 + 2 def lowercase_ ( self : List[str] , _A : Tuple ): '''simple docstring''' return self.heap_dict[key] def lowercase_ ( self : str , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = len(_A ) - 1 UpperCAmelCase__ : Tuple = self.get_parent_idx(_A ) for idx, i in enumerate(_A ): UpperCAmelCase__ : Dict = idx UpperCAmelCase__ : Optional[Any] = i.val for i in range(_A , -1 , -1 ): self.sift_down(_A , _A ) return array def lowercase_ ( self : Optional[Any] , _A : str , _A : List[Any] ): '''simple docstring''' while True: UpperCAmelCase__ : Any = self.get_left_child_idx(_A ) # noqa: E741 UpperCAmelCase__ : Optional[Any] = self.get_right_child_idx(_A ) UpperCAmelCase__ : Tuple = idx if l < len(_A ) and array[l] < array[idx]: UpperCAmelCase__ : int = l if r < len(_A ) and array[r] < array[smallest]: UpperCAmelCase__ : Dict = r if smallest != idx: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = array[smallest], array[idx] ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : List[str] = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) UpperCAmelCase__ : str = smallest else: break def lowercase_ ( self : List[str] , _A : int ): '''simple docstring''' UpperCAmelCase__ : str = self.get_parent_idx(_A ) while p >= 0 and self.heap[p] > self.heap[idx]: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.heap[idx], self.heap[p] UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) UpperCAmelCase__ : Union[str, Any] = p UpperCAmelCase__ : List[Any] = self.get_parent_idx(_A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' return self.heap[0] def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Any = self.heap[-1], self.heap[0] UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) UpperCAmelCase__ : int = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def lowercase_ ( self : int , _A : Union[str, Any] ): '''simple docstring''' self.heap.append(_A ) UpperCAmelCase__ : Union[str, Any] = len(self.heap ) - 1 UpperCAmelCase__ : Optional[Any] = node.val self.sift_up(len(self.heap ) - 1 ) def lowercase_ ( self : str ): '''simple docstring''' return len(self.heap ) == 0 def lowercase_ ( self : int , _A : Optional[Any] , _A : str ): '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" UpperCAmelCase__ : Optional[Any] = new_value UpperCAmelCase__ : List[str] = new_value self.sift_up(self.idx_of_element[node] ) UpperCamelCase__ = Node('''R''', -1) UpperCamelCase__ = Node('''B''', 6) UpperCamelCase__ = Node('''A''', 3) UpperCamelCase__ = Node('''X''', 1) UpperCamelCase__ = Node('''E''', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array UpperCamelCase__ = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('''Min Heap - before decrease key''') for i in my_min_heap.heap: print(i) print('''Min Heap - After decrease key of node [B -> -17]''') my_min_heap.decrease_key(b, -1_7) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
75
1
'''simple docstring''' import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter UpperCamelCase__ = True except ImportError: UpperCamelCase__ = False UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name def a__ ( lowerCAmelCase__ ) -> Optional[int]: return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class lowerCamelCase_ ( __a ): @staticmethod def lowercase_ ( _A : ArgumentParser ): '''simple docstring''' UpperCAmelCase__ : Any = parser.add_parser('''add-new-model''' ) add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' ) add_new_model_parser.add_argument('''--testing_file''' , type=_A , help='''Configuration file on which to run.''' ) add_new_model_parser.add_argument( '''--path''' , type=_A , help='''Path to cookiecutter. Should only be used for testing purposes.''' ) add_new_model_parser.set_defaults(func=_A ) def __init__( self : int , _A : bool , _A : str , _A : Tuple=None , *_A : int ): '''simple docstring''' UpperCAmelCase__ : Any = testing UpperCAmelCase__ : Any = testing_file UpperCAmelCase__ : List[Any] = path def lowercase_ ( self : Tuple ): '''simple docstring''' warnings.warn( '''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ''' '''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ''' '''checks, you should use `transformers-cli add-new-model-like` instead.''' ) if not _has_cookiecutter: raise ImportError( '''Model creation dependencies are required to use the `add_new_model` command. Install them by running ''' '''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory UpperCAmelCase__ : List[Any] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]] if len(_A ) > 0: raise ValueError( '''Several directories starting with `cookiecutter-template-` in current working directory. ''' '''Please clean your directory by removing all folders starting with `cookiecutter-template-` or ''' '''change your working directory.''' ) UpperCAmelCase__ : Dict = ( Path(_A ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) UpperCAmelCase__ : Any = path_to_transformer_root / '''templates''' / '''adding_a_new_model''' # Execute cookiecutter if not self._testing: cookiecutter(str(_A ) ) else: with open(self._testing_file , '''r''' ) as configuration_file: UpperCAmelCase__ : Any = json.load(_A ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=_A , extra_context=_A , ) UpperCAmelCase__ : Dict = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0] # Retrieve configuration with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file: UpperCAmelCase__ : List[str] = json.load(_A ) UpperCAmelCase__ : Any = configuration['''lowercase_modelname'''] UpperCAmelCase__ : Optional[int] = configuration['''generate_tensorflow_pytorch_and_flax'''] os.remove(f"""{directory}/configuration.json""" ) UpperCAmelCase__ : Dict = '''PyTorch''' in generate_tensorflow_pytorch_and_flax UpperCAmelCase__ : Union[str, Any] = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax UpperCAmelCase__ : Union[str, Any] = '''Flax''' in generate_tensorflow_pytorch_and_flax UpperCAmelCase__ : Optional[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}""" os.makedirs(_A , exist_ok=_A ) os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=_A ) # Tests require submodules as they have parent imports with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ): pass shutil.move( f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , ) shutil.move( f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , ) def remove_copy_lines(_A : str ): with open(_A , '''r''' ) as f: UpperCAmelCase__ : int = f.readlines() with open(_A , '''w''' ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(_A ) if output_pytorch: if not self._testing: remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , ) else: os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" ) os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" ) if output_tensorflow: if not self._testing: remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , ) else: os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" ) os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" ) if output_flax: if not self._testing: remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , ) else: os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" ) os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , ) shutil.move( f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(_A : str , _A : str , _A : List[str] ): # Create temp file UpperCAmelCase__ , UpperCAmelCase__ : Dict = mkstemp() UpperCAmelCase__ : Tuple = False with fdopen(_A , '''w''' ) as new_file: with open(_A ) as old_file: for line in old_file: new_file.write(_A ) if line_to_copy_below in line: UpperCAmelCase__ : Tuple = True for line_to_copy in lines_to_copy: new_file.write(_A ) if not line_found: raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" ) # Copy the file permissions from the old file to the new file copymode(_A , _A ) # Remove original file remove(_A ) # Move new file move(_A , _A ) def skip_units(_A : Optional[int] ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(_A : Optional[Any] ): with open(_A ) as datafile: UpperCAmelCase__ : int = [] UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : Optional[Any] = False for line in datafile: if "# To replace in: " in line and "##" not in line: UpperCAmelCase__ : Dict = line.split('''"''' )[1] UpperCAmelCase__ : Optional[int] = skip_units(_A ) elif "# Below: " in line and "##" not in line: UpperCAmelCase__ : List[str] = line.split('''"''' )[1] UpperCAmelCase__ : int = skip_units(_A ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(_A , _A , _A ) UpperCAmelCase__ : str = [] elif "# Replace with" in line and "##" not in line: UpperCAmelCase__ : str = [] elif "##" not in line: lines_to_copy.append(_A ) remove(_A ) replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" ) os.rmdir(_A )
75
'''simple docstring''' import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py UpperCamelCase__ = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. UpperCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS) UpperCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` UpperCamelCase__ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') UpperCamelCase__ = { '''DecisionTransformerConfig''', '''EncoderDecoderConfig''', '''MusicgenConfig''', '''RagConfig''', '''SpeechEncoderDecoderConfig''', '''TimmBackboneConfig''', '''VisionEncoderDecoderConfig''', '''VisionTextDualEncoderConfig''', '''LlamaConfig''', } def a__ ( lowerCAmelCase__ ) -> List[str]: UpperCAmelCase__ : str = None # source code of `config_class` UpperCAmelCase__ : str = inspect.getsource(lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = _re_checkpoint.findall(lowerCAmelCase__ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('''/''' ): UpperCAmelCase__ : List[str] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link UpperCAmelCase__ : Union[str, Any] = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: UpperCAmelCase__ : Any = ckpt_name break return checkpoint def a__ ( ) -> Dict: UpperCAmelCase__ : Optional[Any] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue UpperCAmelCase__ : Any = get_checkpoint_from_config_class(lowerCAmelCase__ ) UpperCAmelCase__ : Optional[int] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0: UpperCAmelCase__ : List[str] = '''\n'''.join(sorted(lowerCAmelCase__ ) ) raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
75
1
'''simple docstring''' import os from datetime import datetime as dt from github import Github UpperCamelCase__ = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''enhancement''', '''new pipeline/model''', '''new scheduler''', '''wip''', ] def a__ ( ) -> List[Any]: UpperCAmelCase__ : int = Github(os.environ['''GITHUB_TOKEN'''] ) UpperCAmelCase__ : List[str] = g.get_repo('''huggingface/diffusers''' ) UpperCAmelCase__ : Optional[Any] = repo.get_issues(state='''open''' ) for issue in open_issues: UpperCAmelCase__ : Union[str, Any] = sorted(issue.get_comments() , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ ) UpperCAmelCase__ : Optional[int] = comments[0] if len(lowerCAmelCase__ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='''closed''' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='''open''' ) issue.remove_from_labels('''stale''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) issue.add_to_labels('''stale''' ) if __name__ == "__main__": main()
75
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'torchsde'] def __init__( self : Tuple , *_A : Any , **_A : Optional[Any] ): '''simple docstring''' requires_backends(self , ['''torch''', '''torchsde'''] ) @classmethod def lowercase_ ( cls : List[Any] , *_A : Tuple , **_A : Tuple ): '''simple docstring''' requires_backends(cls , ['''torch''', '''torchsde'''] ) @classmethod def lowercase_ ( cls : List[str] , *_A : Optional[int] , **_A : Any ): '''simple docstring''' requires_backends(cls , ['''torch''', '''torchsde'''] )
75
1
'''simple docstring''' import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = [ ['''attention''', '''attn'''], ['''encoder_attention''', '''encoder_attn'''], ['''q_lin''', '''q_proj'''], ['''k_lin''', '''k_proj'''], ['''v_lin''', '''v_proj'''], ['''out_lin''', '''out_proj'''], ['''norm_embeddings''', '''layernorm_embedding'''], ['''position_embeddings''', '''embed_positions'''], ['''embeddings''', '''embed_tokens'''], ['''ffn.lin''', '''fc'''], ] def a__ ( lowerCAmelCase__ ) -> Optional[Any]: if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: UpperCAmelCase__ : str = k.replace(lowerCAmelCase__ , lowerCAmelCase__ ) if k.startswith('''encoder''' ): UpperCAmelCase__ : Any = k.replace('''.attn''' , '''.self_attn''' ) UpperCAmelCase__ : Union[str, Any] = k.replace('''norm1''' , '''self_attn_layer_norm''' ) UpperCAmelCase__ : int = k.replace('''norm2''' , '''final_layer_norm''' ) elif k.startswith('''decoder''' ): UpperCAmelCase__ : Dict = k.replace('''norm1''' , '''self_attn_layer_norm''' ) UpperCAmelCase__ : Dict = k.replace('''norm2''' , '''encoder_attn_layer_norm''' ) UpperCAmelCase__ : str = k.replace('''norm3''' , '''final_layer_norm''' ) return k def a__ ( lowerCAmelCase__ ) -> Any: UpperCAmelCase__ : Optional[Any] = [ '''model.encoder.layernorm_embedding.weight''', '''model.encoder.layernorm_embedding.bias''', '''model.decoder.layernorm_embedding.weight''', '''model.decoder.layernorm_embedding.bias''', ] for k in keys: UpperCAmelCase__ : str = sd.pop(lowerCAmelCase__ ) UpperCAmelCase__ : int = k.replace('''layernorm_embedding''' , '''layer_norm''' ) assert new_k not in sd UpperCAmelCase__ : Any = v UpperCamelCase__ = ['''START'''] @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: UpperCAmelCase__ : Optional[int] = torch.load(lowerCAmelCase__ , map_location='''cpu''' ) UpperCAmelCase__ : Union[str, Any] = model['''model'''] UpperCAmelCase__ : Dict = BlenderbotConfig.from_json_file(lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = BlenderbotForConditionalGeneration(lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = m.model.state_dict().keys() UpperCAmelCase__ : Union[str, Any] = [] UpperCAmelCase__ : Union[str, Any] = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue UpperCAmelCase__ : Union[str, Any] = rename_state_dict_key(lowerCAmelCase__ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: UpperCAmelCase__ : Dict = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(lowerCAmelCase__ ) m.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ ) m.half() m.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''') parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''') parser.add_argument( '''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use''' ) UpperCamelCase__ = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
75
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'ctrl' lowerCAmelCase__ = ['past_key_values'] lowerCAmelCase__ = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : List[Any] , _A : Dict=246_534 , _A : Optional[Any]=256 , _A : Dict=1_280 , _A : List[str]=8_192 , _A : Tuple=48 , _A : Optional[Any]=16 , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=1e-6 , _A : Optional[int]=0.0_2 , _A : Tuple=True , **_A : Optional[Any] , ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = vocab_size UpperCAmelCase__ : Any = n_positions UpperCAmelCase__ : Optional[Any] = n_embd UpperCAmelCase__ : List[str] = n_layer UpperCAmelCase__ : Any = n_head UpperCAmelCase__ : int = dff UpperCAmelCase__ : str = resid_pdrop UpperCAmelCase__ : Tuple = embd_pdrop UpperCAmelCase__ : int = layer_norm_epsilon UpperCAmelCase__ : Tuple = initializer_range UpperCAmelCase__ : Union[str, Any] = use_cache super().__init__(**_A )
75
1
'''simple docstring''' import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel UpperCamelCase__ = { '''text_branch''': '''text_model''', '''audio_branch''': '''audio_model.audio_encoder''', '''attn''': '''attention.self''', '''self.proj''': '''output.dense''', '''attention.self_mask''': '''attn_mask''', '''mlp.fc1''': '''intermediate.dense''', '''mlp.fc2''': '''output.dense''', '''norm1''': '''layernorm_before''', '''norm2''': '''layernorm_after''', '''bn0''': '''batch_norm''', } UpperCamelCase__ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''') def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[str]: UpperCAmelCase__ , UpperCAmelCase__ : List[str] = create_model( '''HTSAT-tiny''' , '''roberta''' , lowerCAmelCase__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=lowerCAmelCase__ , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def a__ ( lowerCAmelCase__ ) -> Optional[Any]: UpperCAmelCase__ : int = {} UpperCAmelCase__ : Any = R'''.*sequential.(\d+).*''' UpperCAmelCase__ : Optional[int] = R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: UpperCAmelCase__ : List[str] = key.replace(lowerCAmelCase__ , lowerCAmelCase__ ) if re.match(lowerCAmelCase__ , lowerCAmelCase__ ): # replace sequential layers with list UpperCAmelCase__ : Any = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) UpperCAmelCase__ : Any = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(lowerCAmelCase__ )//3}.linear.""" ) elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase__ : Union[str, Any] = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... UpperCAmelCase__ : Dict = 1 if projecton_layer == 0 else 2 UpperCAmelCase__ : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value UpperCAmelCase__ : Dict = value UpperCAmelCase__ : List[Any] = mixed_qkv.size(0 ) // 3 UpperCAmelCase__ : str = mixed_qkv[:qkv_dim] UpperCAmelCase__ : Dict = mixed_qkv[qkv_dim : qkv_dim * 2] UpperCAmelCase__ : Union[str, Any] = mixed_qkv[qkv_dim * 2 :] UpperCAmelCase__ : str = query_layer UpperCAmelCase__ : Tuple = key_layer UpperCAmelCase__ : int = value_layer else: UpperCAmelCase__ : List[Any] = value return model_state_dict def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[int]: UpperCAmelCase__ , UpperCAmelCase__ : Dict = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ ) clap_model.eval() UpperCAmelCase__ : Union[str, Any] = clap_model.state_dict() UpperCAmelCase__ : Optional[int] = rename_state_dict(lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = ClapConfig() UpperCAmelCase__ : Tuple = enable_fusion UpperCAmelCase__ : Union[str, Any] = ClapModel(lowerCAmelCase__ ) # ignore the spectrogram embedding layer model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ ) model.save_pretrained(lowerCAmelCase__ ) transformers_config.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''') UpperCamelCase__ = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
75
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'pytorch', 'script': 'run_ddp.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'tensorflow', 'script': 'run_tf_dist.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7}, }, ] ) class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : List[str] ): '''simple docstring''' if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , ) assert hasattr(self , '''env''' ) def lowercase_ ( self : List[Any] , _A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}""" # distributed data settings UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , ) def lowercase_ ( self : Optional[int] , _A : Any ): '''simple docstring''' TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(2,)] ) def lowercase_ ( self : Optional[int] , _A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A ) # run training estimator.fit() # result dataframe UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCAmelCase__ : Any = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
75
1
'''simple docstring''' from random import randint, random def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = 5 , ) -> list: UpperCAmelCase__ : List[str] = [[-1] * number_of_cells] # Create a highway without any car UpperCAmelCase__ : List[str] = 0 UpperCAmelCase__ : str = max(lowerCAmelCase__ , 0 ) while i < number_of_cells: UpperCAmelCase__ : Dict = ( randint(0 , lowerCAmelCase__ ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> int: UpperCAmelCase__ : Tuple = 0 UpperCAmelCase__ : List[Any] = highway_now[car_index + 1 :] for cell in range(len(lowerCAmelCase__ ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(lowerCAmelCase__ , -1 ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> list: UpperCAmelCase__ : Tuple = len(lowerCAmelCase__ ) # Beforce calculations, the highway is empty UpperCAmelCase__ : Any = [-1] * number_of_cells for car_index in range(lowerCAmelCase__ ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed UpperCAmelCase__ : int = min(highway_now[car_index] + 1 , lowerCAmelCase__ ) # Number of empty cell before the next car UpperCAmelCase__ : Tuple = get_distance(lowerCAmelCase__ , lowerCAmelCase__ ) - 1 # We can't have the car causing an accident UpperCAmelCase__ : str = min(next_highway[car_index] , lowerCAmelCase__ ) if random() < probability: # Randomly, a driver will slow down UpperCAmelCase__ : Tuple = max(next_highway[car_index] - 1 , 0 ) return next_highway def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> list: UpperCAmelCase__ : List[str] = len(highway[0] ) for i in range(lowerCAmelCase__ ): UpperCAmelCase__ : Optional[int] = update(highway[i] , lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = [-1] * number_of_cells for car_index in range(lowerCAmelCase__ ): UpperCAmelCase__ : List[Any] = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) UpperCAmelCase__ : str = (car_index + speed) % number_of_cells # Commit the change of position UpperCAmelCase__ : Optional[int] = speed highway.append(lowerCAmelCase__ ) return highway if __name__ == "__main__": import doctest doctest.testmod()
75
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets UpperCamelCase__ = '''\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } ''' UpperCamelCase__ = '''\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy. ''' UpperCamelCase__ = R''' Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting "1/2" to "\\frac{1}{2}") Examples: >>> metric = datasets.load_metric("competition_math") >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"]) >>> print(results) {\'accuracy\': 1.0} ''' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): def lowercase_ ( self : Dict ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , ) def lowercase_ ( self : Any , _A : str , _A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 0.0 for i, j in zip(_A , _A ): n_correct += 1.0 if math_equivalence.is_equiv(_A , _A ) else 0.0 UpperCAmelCase__ : Dict = n_correct / len(_A ) return { "accuracy": accuracy, }
75
1
'''simple docstring''' import string def a__ ( lowerCAmelCase__ ) -> None: for key in range(len(string.ascii_uppercase ) ): UpperCAmelCase__ : Tuple = '''''' for symbol in message: if symbol in string.ascii_uppercase: UpperCAmelCase__ : Optional[Any] = string.ascii_uppercase.find(lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = num - key if num < 0: UpperCAmelCase__ : Optional[Any] = num + len(string.ascii_uppercase ) UpperCAmelCase__ : Optional[int] = translated + string.ascii_uppercase[num] else: UpperCAmelCase__ : Optional[Any] = translated + symbol print(F"""Decryption using Key #{key}: {translated}""" ) def a__ ( ) -> None: UpperCAmelCase__ : Union[str, Any] = input('''Encrypted message: ''' ) UpperCAmelCase__ : List[str] = message.upper() decrypt(lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
75
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase__ = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''', }, } UpperCamelCase__ = { '''facebook/bart-base''': 1_0_2_4, '''facebook/bart-large''': 1_0_2_4, '''facebook/bart-large-mnli''': 1_0_2_4, '''facebook/bart-large-cnn''': 1_0_2_4, '''facebook/bart-large-xsum''': 1_0_2_4, '''yjernite/bart_eli5''': 1_0_2_4, } class lowerCamelCase_ ( __a ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = ['input_ids', 'attention_mask'] lowerCAmelCase__ = BartTokenizer def __init__( self : Tuple , _A : List[str]=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Tuple="replace" , _A : Optional[Any]="<s>" , _A : int="</s>" , _A : Optional[Any]="</s>" , _A : List[str]="<s>" , _A : Optional[int]="<unk>" , _A : Optional[int]="<pad>" , _A : str="<mask>" , _A : Dict=False , _A : int=True , **_A : Optional[Any] , ): '''simple docstring''' super().__init__( _A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , ) UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space: UpperCAmelCase__ : str = getattr(_A , pre_tok_state.pop('''type''' ) ) UpperCAmelCase__ : Any = add_prefix_space UpperCAmelCase__ : str = pre_tok_class(**_A ) UpperCAmelCase__ : Dict = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` UpperCAmelCase__ : Optional[Any] = '''post_processor''' UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , _A , _A ) if tokenizer_component_instance: UpperCAmelCase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCAmelCase__ : Union[str, Any] = tuple(state['''sep'''] ) if "cls" in state: UpperCAmelCase__ : Union[str, Any] = tuple(state['''cls'''] ) UpperCAmelCase__ : Dict = False if state.get('''add_prefix_space''' , _A ) != add_prefix_space: UpperCAmelCase__ : Union[str, Any] = add_prefix_space UpperCAmelCase__ : Dict = True if state.get('''trim_offsets''' , _A ) != trim_offsets: UpperCAmelCase__ : List[Any] = trim_offsets UpperCAmelCase__ : List[Any] = True if changes_to_apply: UpperCAmelCase__ : Dict = getattr(_A , state.pop('''type''' ) ) UpperCAmelCase__ : Union[str, Any] = component_class(**_A ) setattr(self.backend_tokenizer , _A , _A ) @property def lowercase_ ( self : Dict ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def lowercase_ ( self : Dict , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value UpperCAmelCase__ : str = value def lowercase_ ( self : Optional[int] , *_A : List[str] , **_A : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = kwargs.get('''is_split_into_words''' , _A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*_A , **_A ) def lowercase_ ( self : Optional[Any] , *_A : Union[str, Any] , **_A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = kwargs.get('''is_split_into_words''' , _A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*_A , **_A ) def lowercase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ): '''simple docstring''' UpperCAmelCase__ : str = self._tokenizer.model.save(_A , name=_A ) return tuple(_A ) def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Optional[int]=None ): '''simple docstring''' UpperCAmelCase__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase_ ( self : int , _A : List[int] , _A : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [self.sep_token_id] UpperCAmelCase__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
75
1
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase_ ( __a ): lowerCAmelCase__ = (DDPMScheduler,) def lowercase_ ( self : Optional[int] , **_A : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[str] = { '''num_train_timesteps''': 1_000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**_A ) return config def lowercase_ ( self : str ): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=_A ) def lowercase_ ( self : Any ): '''simple docstring''' for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def lowercase_ ( self : Dict ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_A ) def lowercase_ ( self : Tuple ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_A ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=_A ) def lowercase_ ( self : Any ): '''simple docstring''' self.check_over_configs(thresholding=_A ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_A , prediction_type=_A , sample_max_value=_A , ) def lowercase_ ( self : Any ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def lowercase_ ( self : Any ): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=_A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.scheduler_classes[0] UpperCAmelCase__ : int = self.get_scheduler_config() UpperCAmelCase__ : Dict = scheduler_class(**_A ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5 def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.scheduler_classes[0] UpperCAmelCase__ : str = self.get_scheduler_config() UpperCAmelCase__ : Any = scheduler_class(**_A ) UpperCAmelCase__ : List[str] = len(_A ) UpperCAmelCase__ : Tuple = self.dummy_model() UpperCAmelCase__ : List[str] = self.dummy_sample_deter UpperCAmelCase__ : Optional[Any] = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual UpperCAmelCase__ : int = model(_A , _A ) # 2. predict previous mean of sample x_t-1 UpperCAmelCase__ : List[Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance UpperCAmelCase__ : Dict = pred_prev_sample UpperCAmelCase__ : List[Any] = torch.sum(torch.abs(_A ) ) UpperCAmelCase__ : List[Any] = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.scheduler_classes[0] UpperCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='''v_prediction''' ) UpperCAmelCase__ : Tuple = scheduler_class(**_A ) UpperCAmelCase__ : Optional[Any] = len(_A ) UpperCAmelCase__ : Any = self.dummy_model() UpperCAmelCase__ : Union[str, Any] = self.dummy_sample_deter UpperCAmelCase__ : Tuple = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual UpperCAmelCase__ : int = model(_A , _A ) # 2. predict previous mean of sample x_t-1 UpperCAmelCase__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance UpperCAmelCase__ : Dict = pred_prev_sample UpperCAmelCase__ : Optional[Any] = torch.sum(torch.abs(_A ) ) UpperCAmelCase__ : List[str] = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.scheduler_classes[0] UpperCAmelCase__ : Dict = self.get_scheduler_config() UpperCAmelCase__ : Dict = scheduler_class(**_A ) UpperCAmelCase__ : List[str] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_A ) UpperCAmelCase__ : Tuple = scheduler.timesteps for i, timestep in enumerate(_A ): if i == len(_A ) - 1: UpperCAmelCase__ : Optional[int] = -1 else: UpperCAmelCase__ : Union[str, Any] = timesteps[i + 1] UpperCAmelCase__ : str = scheduler.previous_timestep(_A ) UpperCAmelCase__ : List[Any] = prev_t.item() self.assertEqual(_A , _A ) def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Any = self.scheduler_classes[0] UpperCAmelCase__ : Any = self.get_scheduler_config() UpperCAmelCase__ : Dict = scheduler_class(**_A ) UpperCAmelCase__ : int = [100, 87, 50, 51, 0] with self.assertRaises(_A , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=_A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.scheduler_classes[0] UpperCAmelCase__ : Optional[Any] = self.get_scheduler_config() UpperCAmelCase__ : Union[str, Any] = scheduler_class(**_A ) UpperCAmelCase__ : Dict = [100, 87, 50, 1, 0] UpperCAmelCase__ : List[Any] = len(_A ) with self.assertRaises(_A , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.scheduler_classes[0] UpperCAmelCase__ : List[str] = self.get_scheduler_config() UpperCAmelCase__ : List[str] = scheduler_class(**_A ) UpperCAmelCase__ : Dict = [scheduler.config.num_train_timesteps] with self.assertRaises( _A , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=_A )
75
'''simple docstring''' import random from typing import Any def a__ ( lowerCAmelCase__ ) -> list[Any]: for _ in range(len(lowerCAmelCase__ ) ): UpperCAmelCase__ : int = random.randint(0 , len(lowerCAmelCase__ ) - 1 ) UpperCAmelCase__ : Optional[int] = random.randint(0 , len(lowerCAmelCase__ ) - 1 ) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = data[b], data[a] return data if __name__ == "__main__": UpperCamelCase__ = [0, 1, 2, 3, 4, 5, 6, 7] UpperCamelCase__ = ['''python''', '''says''', '''hello''', '''!'''] print('''Fisher-Yates Shuffle:''') print('''List''', integers, strings) print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
75
1
'''simple docstring''' def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float: return round(float(moles / volume ) * nfactor ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float: return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float: return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float: return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
75
'''simple docstring''' import math def a__ ( lowerCAmelCase__ ) -> list[int]: UpperCAmelCase__ : List[Any] = [] UpperCAmelCase__ : Dict = 2 UpperCAmelCase__ : Optional[Any] = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment UpperCAmelCase__ : str = [True] * (end + 1) UpperCAmelCase__ : Any = [] while start <= end: if temp[start] is True: in_prime.append(lowerCAmelCase__ ) for i in range(start * start , end + 1 , lowerCAmelCase__ ): UpperCAmelCase__ : Dict = False start += 1 prime += in_prime UpperCAmelCase__ : Optional[int] = end + 1 UpperCAmelCase__ : str = min(2 * end , lowerCAmelCase__ ) while low <= n: UpperCAmelCase__ : List[str] = [True] * (high - low + 1) for each in in_prime: UpperCAmelCase__ : List[str] = math.floor(low / each ) * each if t < low: t += each for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ): UpperCAmelCase__ : Union[str, Any] = False for j in range(len(lowerCAmelCase__ ) ): if temp[j] is True: prime.append(j + low ) UpperCAmelCase__ : Union[str, Any] = high + 1 UpperCAmelCase__ : str = min(high + end , lowerCAmelCase__ ) return prime print(sieve(1_0**6))
75
1
'''simple docstring''' UpperCamelCase__ = 9.80_665 def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = g ) -> float: if fluid_density <= 0: raise ValueError('''Impossible fluid density''' ) if volume < 0: raise ValueError('''Impossible Object volume''' ) if gravity <= 0: raise ValueError('''Impossible Gravity''' ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
75
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ): lowerCAmelCase__ = StableDiffusionInpaintPipeline lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowerCAmelCase__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowerCAmelCase__ = frozenset([] ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , ) UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=_A ) torch.manual_seed(0 ) UpperCAmelCase__ : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , ) UpperCAmelCase__ : Union[str, Any] = CLIPTextModel(_A ) UpperCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCAmelCase__ : str = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowercase_ ( self : str , _A : Dict , _A : Any=0 ): '''simple docstring''' UpperCAmelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((64, 64) ) UpperCAmelCase__ : int = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) ) if str(_A ).startswith('''mps''' ): UpperCAmelCase__ : List[Any] = torch.manual_seed(_A ) else: UpperCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase__ : Optional[int] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': init_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : Tuple = self.get_dummy_components() UpperCAmelCase__ : str = StableDiffusionInpaintPipeline(**_A ) UpperCAmelCase__ : List[str] = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : Dict = self.get_dummy_inputs(_A ) UpperCAmelCase__ : Any = sd_pipe(**_A ).images UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : int = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self : Tuple ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : List[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase__ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase__ : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench.npy''' ) UpperCAmelCase__ : Dict = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase__ : str = torch.manual_seed(0 ) UpperCAmelCase__ : str = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , ) UpperCAmelCase__ : int = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 9e-3 def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase__ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase__ : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' ) UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase__ : Any = StableDiffusionInpaintPipeline.from_pretrained( _A , torch_dtype=torch.floataa , safety_checker=_A , ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , ) UpperCAmelCase__ : Tuple = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def lowercase_ ( self : Any ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase__ : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase__ : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase__ : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase__ : str = PNDMScheduler.from_pretrained(_A , subfolder='''scheduler''' ) UpperCAmelCase__ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained( _A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase__ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase__ : Tuple = torch.manual_seed(0 ) UpperCAmelCase__ : Any = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , ) UpperCAmelCase__ : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.6_5 * 10**9
75
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCamelCase__ = logging.get_logger(__name__) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = ['pixel_values'] def __init__( self : Dict , _A : bool = True , _A : int = 32 , _A : Optional[Any]=PILImageResampling.BILINEAR , _A : bool = True , **_A : List[Any] , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = do_resize UpperCAmelCase__ : int = do_rescale UpperCAmelCase__ : List[Any] = size_divisor UpperCAmelCase__ : List[Any] = resample super().__init__(**_A ) def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : int , _A : Optional[int] , _A : Optional[ChannelDimension] = None , **_A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Dict = get_image_size(_A ) # Rounds the height and width down to the closest multiple of size_divisor UpperCAmelCase__ : Union[str, Any] = height // size_divisor * size_divisor UpperCAmelCase__ : Dict = width // size_divisor * size_divisor UpperCAmelCase__ : Any = resize(_A , (new_h, new_w) , resample=_A , data_format=_A , **_A ) return image def lowercase_ ( self : Optional[int] , _A : np.ndarray , _A : float , _A : Optional[ChannelDimension] = None , **_A : Dict ): '''simple docstring''' return rescale(image=_A , scale=_A , data_format=_A , **_A ) def lowercase_ ( self : Tuple , _A : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , _A : Optional[bool] = None , _A : Optional[int] = None , _A : Any=None , _A : Optional[bool] = None , _A : Optional[Union[TensorType, str]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Optional[int] , ): '''simple docstring''' UpperCAmelCase__ : Dict = do_resize if do_resize is not None else self.do_resize UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase__ : str = size_divisor if size_divisor is not None else self.size_divisor UpperCAmelCase__ : int = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) UpperCAmelCase__ : Optional[int] = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. UpperCAmelCase__ : List[str] = [to_numpy_array(_A ) for img in images] if do_resize: UpperCAmelCase__ : Optional[int] = [self.resize(_A , size_divisor=_A , resample=_A ) for image in images] if do_rescale: UpperCAmelCase__ : List[str] = [self.rescale(_A , scale=1 / 255 ) for image in images] UpperCAmelCase__ : Optional[int] = [to_channel_dimension_format(_A , _A ) for image in images] UpperCAmelCase__ : Dict = {'''pixel_values''': images} return BatchFeature(data=_A , tensor_type=_A )
75
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCamelCase__ = '''platform''' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Tuple: if attention_mask is None: UpperCAmelCase__ : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: UpperCAmelCase__ : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: UpperCAmelCase__ : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase__ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase__ : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class lowerCamelCase_ : def __init__( self : Optional[Any] , _A : Optional[Any] , _A : str=13 , _A : int=7 , _A : Any=True , _A : List[Any]=False , _A : Optional[int]=99 , _A : Optional[int]=16 , _A : int=2 , _A : Optional[int]=4 , _A : Optional[int]=4 , _A : int="gelu" , _A : List[str]=0.1 , _A : str=0.1 , _A : int=32 , _A : Optional[int]=2 , _A : int=1 , _A : Dict=0 , _A : Dict=0.0_2 , ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = parent UpperCAmelCase__ : str = batch_size UpperCAmelCase__ : Dict = seq_length UpperCAmelCase__ : str = is_training UpperCAmelCase__ : int = use_labels UpperCAmelCase__ : Union[str, Any] = vocab_size UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : Any = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : str = hidden_act UpperCAmelCase__ : str = hidden_dropout_prob UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase__ : Union[str, Any] = max_position_embeddings UpperCAmelCase__ : int = eos_token_id UpperCAmelCase__ : Optional[int] = pad_token_id UpperCAmelCase__ : List[str] = bos_token_id UpperCAmelCase__ : Union[str, Any] = initializer_range def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) UpperCAmelCase__ : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) UpperCAmelCase__ : List[Any] = shift_tokens_right(_A , 1 , 2 ) UpperCAmelCase__ : List[Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , ) UpperCAmelCase__ : Tuple = prepare_blenderbot_inputs_dict(_A , _A , _A ) return config, inputs_dict def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs() return config, inputs_dict def lowercase_ ( self : int , _A : List[Any] , _A : Optional[Any] , _A : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = 20 UpperCAmelCase__ : int = model_class_name(_A ) UpperCAmelCase__ : str = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase__ , UpperCAmelCase__ : Dict = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _A , _A ) UpperCAmelCase__ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) UpperCAmelCase__ : str = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase__ : str = model.decode( decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , ) UpperCAmelCase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase__ : Tuple = model.decode( decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , ) UpperCAmelCase__ : int = model.decode(_A , _A ) UpperCAmelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def lowercase_ ( self : Tuple , _A : List[Any] , _A : Tuple , _A : Tuple ): '''simple docstring''' UpperCAmelCase__ : Tuple = 20 UpperCAmelCase__ : Optional[int] = model_class_name(_A ) UpperCAmelCase__ : Optional[int] = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase__ : Any = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) UpperCAmelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A ) UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase__ : int = model.decode( decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , ) UpperCAmelCase__ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase__ : Any = model.decode( decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , ) UpperCAmelCase__ : List[str] = model.decode(_A , _A , decoder_attention_mask=_A ) UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class lowerCamelCase_ ( unittest.TestCase ): lowerCAmelCase__ = 9_9 def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) UpperCAmelCase__ : int = input_ids.shape[0] UpperCAmelCase__ : List[str] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._get_config_and_data() UpperCAmelCase__ : Any = FlaxBlenderbotForConditionalGeneration(_A ) UpperCAmelCase__ : Optional[int] = lm_model(input_ids=_A ) UpperCAmelCase__ : Dict = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) UpperCAmelCase__ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_A ) UpperCAmelCase__ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) UpperCAmelCase__ : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) UpperCAmelCase__ : Tuple = lm_model(input_ids=_A , decoder_input_ids=_A ) UpperCAmelCase__ : int = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) UpperCAmelCase__ : Union[str, Any] = shift_tokens_right(_A , 1 , 2 ) UpperCAmelCase__ : str = np.equal(_A , 1 ).astype(np.floataa ).sum() UpperCAmelCase__ : Dict = np.equal(_A , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(_A , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class lowerCamelCase_ ( __a , unittest.TestCase , __a ): lowerCAmelCase__ = True lowerCAmelCase__ = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = FlaxBlenderbotModelTester(self ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_A , _A , _A ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : Dict = self._prepare_for_class(_A , _A ) UpperCAmelCase__ : str = model_class(_A ) @jax.jit def encode_jitted(_A : Any , _A : Tuple=None , **_A : Optional[int] ): return model.encode(input_ids=_A , attention_mask=_A ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase__ : Optional[Any] = encode_jitted(**_A ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase__ : Tuple = encode_jitted(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) ) for jitted_output, output in zip(_A , _A ): self.assertEqual(jitted_output.shape , output.shape ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : List[str] = model_class(_A ) UpperCAmelCase__ : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) UpperCAmelCase__ : Tuple = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(_A : Optional[int] , _A : List[Any] , _A : int ): return model.decode( decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase__ : Any = decode_jitted(**_A ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase__ : Optional[int] = decode_jitted(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) ) for jitted_output, output in zip(_A , _A ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowercase_ ( self : List[str] ): '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids UpperCAmelCase__ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id UpperCAmelCase__ : Union[str, Any] = model(_A ) self.assertIsNotNone(_A ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25} UpperCAmelCase__ : int = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} UpperCAmelCase__ : str = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_A ) UpperCAmelCase__ : Optional[Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) UpperCAmelCase__ : Optional[Any] = ['''Sam'''] UpperCAmelCase__ : Dict = tokenizer(_A , return_tensors='''jax''' ) UpperCAmelCase__ : List[str] = model.generate(**_A , **_A ) UpperCAmelCase__ : Dict = '''Sam is a great name. It means "sun" in Gaelic.''' UpperCAmelCase__ : Any = tokenizer.batch_decode(_A , **_A ) assert generated_txt[0].strip() == tgt_text
75
1
'''simple docstring''' from collections import defaultdict def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> bool: UpperCAmelCase__ : Optional[Any] = first_str.lower().strip() UpperCAmelCase__ : Optional[Any] = second_str.lower().strip() # Remove whitespace UpperCAmelCase__ : Any = first_str.replace(''' ''' , '''''' ) UpperCAmelCase__ : Dict = second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): return False # Default values for count should be 0 UpperCAmelCase__ : defaultdict[str, int] = defaultdict(lowerCAmelCase__ ) # For each character in input strings, # increment count in the corresponding for i in range(len(lowerCAmelCase__ ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCamelCase__ = input('''Enter the first string ''').strip() UpperCamelCase__ = input('''Enter the second string ''').strip() UpperCamelCase__ = check_anagrams(input_a, input_b) print(F"""{input_a} and {input_b} are {'' if status else 'not '}anagrams.""")
75
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class lowerCamelCase_ ( datasets.BeamBasedBuilder ): def lowercase_ ( self : str ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_A , ) def lowercase_ ( self : int , _A : Optional[int] , _A : Optional[Any] ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )] def lowercase_ ( self : Union[str, Any] , _A : str , _A : Union[str, Any] ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_A ) class lowerCamelCase_ ( datasets.BeamBasedBuilder ): def lowercase_ ( self : Any ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_A , ) def lowercase_ ( self : Any , _A : List[str] , _A : Any ): '''simple docstring''' return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} ) ] def lowercase_ ( self : List[str] , _A : Optional[int] , _A : Tuple ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_A ) def a__ ( ) -> Tuple: return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] def a__ ( ) -> Optional[Any]: return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] class lowerCamelCase_ ( __a ): @require_beam def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Any = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) UpperCAmelCase__ : Union[str, Any] = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A ) self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def lowercase_ ( self : Any ): '''simple docstring''' import apache_beam as beam UpperCAmelCase__ : List[str] = beam.io.parquetio.WriteToParquet UpperCAmelCase__ : int = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Optional[int] = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' ) with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock: UpperCAmelCase__ : Dict = partial(_A , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( _A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertTrue( os.path.exists( os.path.join( _A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) ) self.assertTrue( os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def lowercase_ ( self : int ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Tuple = DummyBeamDataset(cache_dir=_A ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : int = NestedBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) ) UpperCAmelCase__ : Optional[int] = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A ) self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset
75
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor UpperCamelCase__ = logging.get_logger(__name__) class lowerCamelCase_ ( __a ): def __init__( self : int , *_A : Union[str, Any] , **_A : Any ): '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
75
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input UpperCamelCase__ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def a__ ( ) -> List[str]: UpperCAmelCase__ : Optional[int] = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: UpperCAmelCase__ : Any = get_sagemaker_input() else: UpperCAmelCase__ : List[str] = get_cluster_input() return config def a__ ( lowerCAmelCase__=None ) -> List[Any]: if subparsers is not None: UpperCAmelCase__ : Union[str, Any] = subparsers.add_parser('''config''' , description=lowerCAmelCase__ ) else: UpperCAmelCase__ : Dict = argparse.ArgumentParser('''Accelerate config command''' , description=lowerCAmelCase__ ) parser.add_argument( '''--config_file''' , default=lowerCAmelCase__ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=lowerCAmelCase__ ) return parser def a__ ( lowerCAmelCase__ ) -> List[Any]: UpperCAmelCase__ : List[Any] = get_user_input() if args.config_file is not None: UpperCAmelCase__ : Any = args.config_file else: if not os.path.isdir(lowerCAmelCase__ ): os.makedirs(lowerCAmelCase__ ) UpperCAmelCase__ : int = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(lowerCAmelCase__ ) else: config.to_yaml_file(lowerCAmelCase__ ) print(F"""accelerate configuration saved at {config_file}""" ) def a__ ( ) -> str: UpperCAmelCase__ : Optional[int] = config_command_parser() UpperCAmelCase__ : Any = parser.parse_args() config_command(lowerCAmelCase__ ) if __name__ == "__main__": main()
75
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging UpperCamelCase__ = logging.get_logger(__name__) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = ['input_features', 'attention_mask'] def __init__( self : Any , _A : Tuple=80 , _A : Optional[int]=16_000 , _A : Optional[Any]=80 , _A : List[Any]=0.0 , _A : List[Any]=True , _A : List[str]=True , _A : Any=True , **_A : Tuple , ): '''simple docstring''' super().__init__(feature_size=_A , sampling_rate=_A , padding_value=_A , **_A ) UpperCAmelCase__ : int = num_mel_bins UpperCAmelCase__ : Optional[Any] = do_ceptral_normalize UpperCAmelCase__ : List[str] = normalize_means UpperCAmelCase__ : Optional[Any] = normalize_vars UpperCAmelCase__ : int = True def lowercase_ ( self : int , _A : np.ndarray , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers UpperCAmelCase__ : Optional[int] = torch.from_numpy(_A ).unsqueeze(0 ) UpperCAmelCase__ : Union[str, Any] = ta_kaldi.fbank(_A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def lowercase_ ( _A : np.ndarray , _A : int , _A : Optional[bool] = True , _A : Optional[bool] = True , _A : float = 0.0 , ): '''simple docstring''' if normalize_means: UpperCAmelCase__ : Optional[Any] = x[:input_length].mean(axis=0 ) UpperCAmelCase__ : Dict = np.subtract(_A , _A ) if normalize_vars: UpperCAmelCase__ : Union[str, Any] = x[:input_length].std(axis=0 ) UpperCAmelCase__ : str = np.divide(_A , _A ) if input_length < x.shape[0]: UpperCAmelCase__ : str = padding_value # make sure array is in float32 UpperCAmelCase__ : int = x.astype(np.floataa ) return x def lowercase_ ( self : str , _A : List[np.ndarray] , _A : Optional[np.ndarray] = None ): '''simple docstring''' UpperCAmelCase__ : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(_A , _A , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(_A , _A ) ] def __call__( self : Optional[int] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : Any , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) UpperCAmelCase__ : Any = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) UpperCAmelCase__ : Tuple = is_batched_numpy or ( isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase__ : Any = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_A , np.ndarray ): UpperCAmelCase__ : Any = np.asarray(_A , dtype=np.floataa ) elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCAmelCase__ : List[str] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase__ : Optional[Any] = [raw_speech] # extract fbank features UpperCAmelCase__ : Optional[Any] = [self._extract_fbank_features(_A ) for waveform in raw_speech] # convert into correct format for padding UpperCAmelCase__ : List[Any] = BatchFeature({'''input_features''': features} ) UpperCAmelCase__ : int = self.pad( _A , padding=_A , max_length=_A , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=_A , **_A , ) # make sure list is in array format UpperCAmelCase__ : List[Any] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , _A ): UpperCAmelCase__ : Optional[int] = [np.asarray(_A , dtype=np.floataa ) for feature in input_features] UpperCAmelCase__ : List[Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: UpperCAmelCase__ : Dict = [np.asarray(_A , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: UpperCAmelCase__ : List[str] = ( np.array(_A , dtype=np.intaa ) if self._get_padding_strategies(_A , max_length=_A ) is not PaddingStrategy.DO_NOT_PAD else None ) UpperCAmelCase__ : str = self.normalize( padded_inputs['''input_features'''] , attention_mask=_A ) if return_tensors is not None: UpperCAmelCase__ : Optional[Any] = padded_inputs.convert_to_tensors(_A ) return padded_inputs
75
'''simple docstring''' import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: # Construct model if gpta_config_file == "": UpperCAmelCase__ : Optional[Any] = GPTaConfig() else: UpperCAmelCase__ : Tuple = GPTaConfig.from_json_file(lowerCAmelCase__ ) UpperCAmelCase__ : Dict = GPTaModel(lowerCAmelCase__ ) # Load weights from numpy load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Save pytorch-model UpperCAmelCase__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME UpperCAmelCase__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , lowerCAmelCase__ ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--gpt2_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained OpenAI model. \n''' '''This specifies the model architecture.''' ), ) UpperCamelCase__ = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
75
1
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('''dataset_size''' , [None, 4_00 * 2**20, 6_00 * 2**20] ) @pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 1_00 * 2**20, 9_00 * 2**20] ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , lowerCAmelCase__ ) UpperCAmelCase__ : Any = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: UpperCAmelCase__ : Any = dataset_size < in_memory_max_size else: UpperCAmelCase__ : List[Any] = False UpperCAmelCase__ : Dict = is_small_dataset(lowerCAmelCase__ ) assert result == expected
75
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCamelCase_ : def __init__( self : Optional[int] , _A : Optional[Any] , _A : Tuple=2 , _A : Tuple=3 , _A : Optional[Any]=4 , _A : List[Any]=2 , _A : List[Any]=7 , _A : int=True , _A : Dict=True , _A : int=True , _A : Dict=True , _A : Tuple=99 , _A : Union[str, Any]=36 , _A : int=2 , _A : List[str]=4 , _A : int=37 , _A : List[Any]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Tuple=2 , _A : Union[str, Any]=0.0_2 , _A : Any=6 , _A : Union[str, Any]=6 , _A : str=3 , _A : str=4 , _A : Tuple=None , _A : int=1_000 , ): '''simple docstring''' UpperCAmelCase__ : int = parent UpperCAmelCase__ : Optional[int] = batch_size UpperCAmelCase__ : str = num_channels UpperCAmelCase__ : str = image_size UpperCAmelCase__ : List[str] = patch_size UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : List[str] = use_input_mask UpperCAmelCase__ : Tuple = use_token_type_ids UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : int = vocab_size UpperCAmelCase__ : List[Any] = hidden_size UpperCAmelCase__ : Optional[int] = num_hidden_layers UpperCAmelCase__ : List[str] = num_attention_heads UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Dict = hidden_act UpperCAmelCase__ : int = hidden_dropout_prob UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase__ : List[str] = max_position_embeddings UpperCAmelCase__ : Tuple = type_vocab_size UpperCAmelCase__ : Any = type_sequence_label_size UpperCAmelCase__ : List[str] = initializer_range UpperCAmelCase__ : List[str] = coordinate_size UpperCAmelCase__ : Tuple = shape_size UpperCAmelCase__ : Optional[int] = num_labels UpperCAmelCase__ : Optional[Any] = num_choices UpperCAmelCase__ : Union[str, Any] = scope UpperCAmelCase__ : Optional[Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) UpperCAmelCase__ : str = text_seq_length UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1 UpperCAmelCase__ : Tuple = self.text_seq_length + self.image_seq_length def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) UpperCAmelCase__ : int = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase__ : str = bbox[i, j, 3] UpperCAmelCase__ : Dict = bbox[i, j, 1] UpperCAmelCase__ : str = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase__ : Optional[int] = bbox[i, j, 2] UpperCAmelCase__ : Any = bbox[i, j, 0] UpperCAmelCase__ : List[Any] = tmp_coordinate UpperCAmelCase__ : str = tf.constant(_A ) UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Any = None if self.use_input_mask: UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] ) UpperCAmelCase__ : Any = None if self.use_token_type_ids: UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : List[str] = None if self.use_labels: UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : int = TFLayoutLMvaModel(config=_A ) # text + image UpperCAmelCase__ : Tuple = model(_A , pixel_values=_A , training=_A ) UpperCAmelCase__ : Tuple = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , ) UpperCAmelCase__ : Optional[Any] = model(_A , bbox=_A , pixel_values=_A , training=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only UpperCAmelCase__ : Any = model(_A , training=_A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only UpperCAmelCase__ : str = model({'''pixel_values''': pixel_values} , training=_A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Any , _A : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.num_labels UpperCAmelCase__ : int = TFLayoutLMvaForSequenceClassification(config=_A ) UpperCAmelCase__ : Union[str, Any] = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self : Dict , _A : List[Any] , _A : Any , _A : Dict , _A : str , _A : Optional[int] , _A : str , _A : str ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.num_labels UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=_A ) UpperCAmelCase__ : Optional[int] = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowercase_ ( self : Dict , _A : Dict , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Tuple , _A : Dict , _A : str ): '''simple docstring''' UpperCAmelCase__ : str = 2 UpperCAmelCase__ : Dict = TFLayoutLMvaForQuestionAnswering(config=_A ) UpperCAmelCase__ : str = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : int = self.prepare_config_and_inputs() ((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = config_and_inputs UpperCAmelCase__ : List[Any] = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowerCAmelCase__ = ( {'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel} if is_tf_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Dict , _A : List[str] ): '''simple docstring''' return True def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Any , _A : Dict=False ): '''simple docstring''' UpperCAmelCase__ : List[Any] = copy.deepcopy(_A ) if model_class in get_values(_A ): UpperCAmelCase__ : Tuple = { k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(_A , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(_A ): UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_A ): UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_A ): UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_A ): UpperCAmelCase__ : int = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Any = TFLayoutLMvaModelTester(self ) UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 ) def lowercase_ ( self : str ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Optional[Any] = model_class(_A ) if getattr(_A , '''hf_compute_loss''' , _A ): # The number of elements in the loss should be the same as the number of elements in the label UpperCAmelCase__ : Tuple = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : List[Any] = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0] ] UpperCAmelCase__ : Optional[Any] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs UpperCAmelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' ) UpperCAmelCase__ : List[Any] = model(_A , **_A )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' ) if "labels" in prepared_for_class: UpperCAmelCase__ : Optional[Any] = prepared_for_class['''labels'''].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: UpperCAmelCase__ : Any = -100 UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(_A ) UpperCAmelCase__ : int = model(_A , **_A )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : Dict = model(_A )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple UpperCAmelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) # Get keys that were added with the _prepare_for_class function UpperCAmelCase__ : Optional[int] = prepared_for_class.keys() - inputs_dict.keys() UpperCAmelCase__ : int = inspect.signature(model.call ).parameters UpperCAmelCase__ : Union[str, Any] = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple UpperCAmelCase__ : Dict = {0: '''input_ids'''} for label_key in label_keys: UpperCAmelCase__ : str = signature_names.index(_A ) UpperCAmelCase__ : List[Any] = label_key UpperCAmelCase__ : Dict = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple UpperCAmelCase__ : Tuple = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: UpperCAmelCase__ : Any = prepared_for_class[value] UpperCAmelCase__ : Tuple = tuple(_A ) # Send to model UpperCAmelCase__ : Optional[Any] = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def lowercase_ ( self : int ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A ) def lowercase_ ( self : Tuple ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase__ : Union[str, Any] = type self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A ) def lowercase_ ( self : List[str] ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( _A , _A , _A , _A , _A , _A , _A ) def lowercase_ ( self : Any ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( _A , _A , _A , _A , _A , _A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( _A , _A , _A , _A , _A , _A , _A ) @slow def lowercase_ ( self : List[Any] ): '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def a__ ( ) -> List[str]: UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf class lowerCamelCase_ ( unittest.TestCase ): @cached_property def lowercase_ ( self : Dict ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None @slow def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ) UpperCAmelCase__ : Dict = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''tf''' ).pixel_values UpperCAmelCase__ : str = tf.constant([[1, 2]] ) UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass UpperCAmelCase__ : int = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A ) # verify the logits UpperCAmelCase__ : Optional[int] = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , _A ) UpperCAmelCase__ : Dict = tf.constant( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
75
1
'''simple docstring''' import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase_ : def __init__( self : Tuple , _A : Any , _A : List[str]=13 , _A : Optional[int]=[30, 30] , _A : List[str]=2 , _A : Union[str, Any]=3 , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : Tuple=32 , _A : Optional[Any]=5 , _A : List[Any]=4 , _A : Any=37 , _A : List[str]="gelu" , _A : Tuple=0.1 , _A : str=0.1 , _A : Tuple=10 , _A : List[Any]=0.0_2 , _A : Any=3 , _A : Optional[int]=None , _A : Tuple=8 , _A : Optional[Any]=10 , ): '''simple docstring''' UpperCAmelCase__ : List[str] = parent UpperCAmelCase__ : Dict = batch_size UpperCAmelCase__ : str = image_size UpperCAmelCase__ : List[Any] = patch_size UpperCAmelCase__ : str = num_channels UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : Optional[int] = use_labels UpperCAmelCase__ : str = hidden_size UpperCAmelCase__ : Optional[Any] = num_hidden_layers UpperCAmelCase__ : Dict = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : List[str] = hidden_act UpperCAmelCase__ : str = hidden_dropout_prob UpperCAmelCase__ : List[str] = attention_probs_dropout_prob UpperCAmelCase__ : Optional[Any] = type_sequence_label_size UpperCAmelCase__ : List[str] = initializer_range UpperCAmelCase__ : str = num_labels UpperCAmelCase__ : List[str] = scope UpperCAmelCase__ : Union[str, Any] = n_targets UpperCAmelCase__ : int = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens UpperCAmelCase__ : Any = (image_size[1] // patch_size) * (image_size[0] // patch_size) UpperCAmelCase__ : Tuple = num_patches + 1 + self.num_detection_tokens def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) UpperCAmelCase__ : Optional[int] = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) UpperCAmelCase__ : List[Any] = [] for i in range(self.batch_size ): UpperCAmelCase__ : str = {} UpperCAmelCase__ : Optional[Any] = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=_A ) UpperCAmelCase__ : Union[str, Any] = torch.rand(self.n_targets , 4 , device=_A ) labels.append(_A ) UpperCAmelCase__ : List[Any] = self.get_config() return config, pixel_values, labels def lowercase_ ( self : Tuple ): '''simple docstring''' return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def lowercase_ ( self : Union[str, Any] , _A : int , _A : List[str] , _A : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = YolosModel(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : List[Any] = model(_A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : Dict , _A : Dict ): '''simple docstring''' UpperCAmelCase__ : List[str] = YolosForObjectDetection(_A ) model.to(_A ) model.eval() UpperCAmelCase__ : Optional[Any] = model(pixel_values=_A ) UpperCAmelCase__ : Dict = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) UpperCAmelCase__ : Dict = model(pixel_values=_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : int = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs UpperCAmelCase__ : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = (YolosModel, YolosForObjectDetection) if is_torch_available() else () lowerCAmelCase__ = ( {'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowercase_ ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : Any=False ): '''simple docstring''' UpperCAmelCase__ : Tuple = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": UpperCAmelCase__ : int = [] for i in range(self.model_tester.batch_size ): UpperCAmelCase__ : str = {} UpperCAmelCase__ : str = torch.ones( size=(self.model_tester.n_targets,) , device=_A , dtype=torch.long ) UpperCAmelCase__ : str = torch.ones( self.model_tester.n_targets , 4 , device=_A , dtype=torch.float ) labels.append(_A ) UpperCAmelCase__ : str = labels return inputs_dict def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = YolosModelTester(self ) UpperCAmelCase__ : Optional[int] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' pass def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Any = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , nn.Linear ) ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(_A ) UpperCAmelCase__ : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : List[str] = [*signature.parameters.keys()] UpperCAmelCase__ : str = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : List[Any] = True # in YOLOS, the seq_len is different UpperCAmelCase__ : Union[str, Any] = self.model_tester.expected_seq_len for model_class in self.all_model_classes: UpperCAmelCase__ : Any = True UpperCAmelCase__ : Any = False UpperCAmelCase__ : List[Any] = True UpperCAmelCase__ : Union[str, Any] = model_class(_A ) model.to(_A ) model.eval() with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(_A , _A ) ) UpperCAmelCase__ : Any = outputs.attentions self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] UpperCAmelCase__ : Union[str, Any] = True UpperCAmelCase__ : int = model_class(_A ) model.to(_A ) model.eval() with torch.no_grad(): UpperCAmelCase__ : Any = model(**self._prepare_for_class(_A , _A ) ) UpperCAmelCase__ : str = outputs.attentions self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) UpperCAmelCase__ : str = len(_A ) # Check attention is always last and order is fine UpperCAmelCase__ : Optional[int] = True UpperCAmelCase__ : List[Any] = True UpperCAmelCase__ : Optional[int] = model_class(_A ) model.to(_A ) model.eval() with torch.no_grad(): UpperCAmelCase__ : List[str] = model(**self._prepare_for_class(_A , _A ) ) UpperCAmelCase__ : Any = 1 self.assertEqual(out_len + added_hidden_states , len(_A ) ) UpperCAmelCase__ : List[str] = outputs.attentions self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' def check_hidden_states_output(_A : Optional[int] , _A : int , _A : List[str] ): UpperCAmelCase__ : Union[str, Any] = model_class(_A ) model.to(_A ) model.eval() with torch.no_grad(): UpperCAmelCase__ : str = model(**self._prepare_for_class(_A , _A ) ) UpperCAmelCase__ : Optional[int] = outputs.hidden_states UpperCAmelCase__ : List[str] = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_A ) , _A ) # YOLOS has a different seq_length UpperCAmelCase__ : Optional[Any] = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : int = True check_hidden_states_output(_A , _A , _A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase__ : Any = True check_hidden_states_output(_A , _A , _A ) def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*_A ) @slow def lowercase_ ( self : Optional[Any] ): '''simple docstring''' for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : str = YolosModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def a__ ( ) -> Dict: UpperCAmelCase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): @cached_property def lowercase_ ( self : List[str] ): '''simple docstring''' return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None @slow def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(_A ) UpperCAmelCase__ : Optional[Any] = self.default_image_processor UpperCAmelCase__ : Dict = prepare_img() UpperCAmelCase__ : Dict = image_processor(images=_A , return_tensors='''pt''' ).to(_A ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(inputs.pixel_values ) # verify outputs UpperCAmelCase__ : Any = torch.Size((1, 100, 92) ) self.assertEqual(outputs.logits.shape , _A ) UpperCAmelCase__ : Dict = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=_A , ) UpperCAmelCase__ : Optional[Any] = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=_A ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _A , atol=1e-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _A , atol=1e-4 ) ) # verify postprocessing UpperCAmelCase__ : Any = image_processor.post_process_object_detection( _A , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] UpperCAmelCase__ : str = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(_A ) UpperCAmelCase__ : Any = [75, 75, 17, 63, 17] UpperCAmelCase__ : int = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(_A ) self.assertEqual(len(results['''scores'''] ) , 5 ) self.assertTrue(torch.allclose(results['''scores'''] , _A , atol=1e-4 ) ) self.assertSequenceEqual(results['''labels'''].tolist() , _A ) self.assertTrue(torch.allclose(results['''boxes'''][0, :] , _A ) )
75
'''simple docstring''' import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": UpperCamelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') UpperCamelCase__ = F"""https://www.google.com/search?q={query}&num=100""" UpperCamelCase__ = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: UpperCamelCase__ = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: UpperCamelCase__ = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
75
1
'''simple docstring''' def a__ ( lowerCAmelCase__ ) -> list: if len(lowerCAmelCase__ ) <= 1: return [tuple(lowerCAmelCase__ )] UpperCAmelCase__ : int = [] def generate(lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase__ : int = [0] * n res.append(tuple(lowerCAmelCase__ ) ) UpperCAmelCase__ : Any = 0 while i < n: if c[i] < i: if i % 2 == 0: UpperCAmelCase__ , UpperCAmelCase__ : str = arr[i], arr[0] else: UpperCAmelCase__ , UpperCAmelCase__ : int = arr[i], arr[c[i]] res.append(tuple(lowerCAmelCase__ ) ) c[i] += 1 UpperCAmelCase__ : List[Any] = 0 else: UpperCAmelCase__ : Optional[Any] = 0 i += 1 generate(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) return res if __name__ == "__main__": UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip() UpperCamelCase__ = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
75
'''simple docstring''' from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray: UpperCAmelCase__ : List[str] = cva.getAffineTransform(lowerCAmelCase__ , lowerCAmelCase__ ) return cva.warpAffine(lowerCAmelCase__ , lowerCAmelCase__ , (rows, cols) ) if __name__ == "__main__": # read original image UpperCamelCase__ = cva.imread( str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''') ) # turn image in gray scale value UpperCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape UpperCamelCase__ , UpperCamelCase__ = gray_img.shape # set different points to rotate image UpperCamelCase__ = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa) UpperCamelCase__ = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa) UpperCamelCase__ = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa) UpperCamelCase__ = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa) # add all rotated images in a list UpperCamelCase__ = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations UpperCamelCase__ = plt.figure(1) UpperCamelCase__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3'''] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''') plt.title(titles[i]) plt.axis('''off''') plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
75
1
'''simple docstring''' import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class lowerCamelCase_ ( __a ): def __init__( self : Optional[int] , _A : Union[str, "sqlalchemy.sql.Selectable"] , _A : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _A : Optional[Features] = None , _A : str = None , _A : bool = False , **_A : Dict , ): '''simple docstring''' super().__init__(features=_A , cache_dir=_A , keep_in_memory=_A , **_A ) UpperCAmelCase__ : int = Sql( cache_dir=_A , features=_A , sql=_A , con=_A , **_A , ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = None UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : List[str] = None UpperCAmelCase__ : List[str] = None self.builder.download_and_prepare( download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , ) # Build dataset for splits UpperCAmelCase__ : Optional[int] = self.builder.as_dataset( split='''train''' , verification_mode=_A , in_memory=self.keep_in_memory ) return dataset class lowerCamelCase_ : def __init__( self : Any , _A : Dataset , _A : str , _A : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : Tuple , ): '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" ) UpperCAmelCase__ : Any = dataset UpperCAmelCase__ : int = name UpperCAmelCase__ : Union[str, Any] = con UpperCAmelCase__ : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE UpperCAmelCase__ : int = num_proc UpperCAmelCase__ : Optional[int] = to_sql_kwargs def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = self.to_sql_kwargs.pop('''sql''' , _A ) UpperCAmelCase__ : int = self.to_sql_kwargs.pop('''con''' , _A ) UpperCAmelCase__ : List[Any] = self.to_sql_kwargs.pop('''index''' , _A ) UpperCAmelCase__ : Optional[int] = self._write(index=_A , **self.to_sql_kwargs ) return written def lowercase_ ( self : Dict , _A : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = args UpperCAmelCase__ : int = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs UpperCAmelCase__ : Union[str, Any] = query_table( table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , ) UpperCAmelCase__ : Tuple = batch.to_pandas() UpperCAmelCase__ : Tuple = df.to_sql(self.name , self.con , index=_A , **_A ) return num_rows or len(_A ) def lowercase_ ( self : Optional[Any] , _A : Optional[int] , **_A : Tuple ): '''simple docstring''' UpperCAmelCase__ : Any = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += num_rows return written
75
'''simple docstring''' from datetime import datetime as dt import os from github import Github UpperCamelCase__ = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''feature request''', '''new model''', '''wip''', ] def a__ ( ) -> List[str]: UpperCAmelCase__ : int = Github(os.environ['''GITHUB_TOKEN'''] ) UpperCAmelCase__ : List[Any] = g.get_repo('''huggingface/transformers''' ) UpperCAmelCase__ : List[str] = repo.get_issues(state='''open''' ) for issue in open_issues: UpperCAmelCase__ : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = comments[0] if len(lowerCAmelCase__ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
75
1
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets UpperCamelCase__ = '''\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } ''' UpperCamelCase__ = '''\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy. ''' UpperCamelCase__ = R''' Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting "1/2" to "\\frac{1}{2}") Examples: >>> metric = datasets.load_metric("competition_math") >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"]) >>> print(results) {\'accuracy\': 1.0} ''' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): def lowercase_ ( self : Dict ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , ) def lowercase_ ( self : Any , _A : str , _A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 0.0 for i, j in zip(_A , _A ): n_correct += 1.0 if math_equivalence.is_equiv(_A , _A ) else 0.0 UpperCAmelCase__ : Dict = n_correct / len(_A ) return { "accuracy": accuracy, }
75
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCamelCase_ ( __a ): def __init__( self : Dict , _A : List[str] , _A : int ): '''simple docstring''' super().__init__() self.register_modules(unet=_A , scheduler=_A ) @torch.no_grad() def __call__( self : List[Any] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ): '''simple docstring''' if audio_length_in_s is None: UpperCAmelCase__ : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate UpperCAmelCase__ : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate UpperCAmelCase__ : List[Any] = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) UpperCAmelCase__ : List[Any] = int(_A ) if sample_size % down_scale_factor != 0: UpperCAmelCase__ : int = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" ''' process.''' ) UpperCAmelCase__ : Dict = int(_A ) UpperCAmelCase__ : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype UpperCAmelCase__ : int = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(_A , _A ) and len(_A ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) UpperCAmelCase__ : Optional[int] = randn_tensor(_A , generator=_A , device=self.device , dtype=_A ) # set step values self.scheduler.set_timesteps(_A , device=audio.device ) UpperCAmelCase__ : List[str] = self.scheduler.timesteps.to(_A ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCAmelCase__ : Optional[int] = self.unet(_A , _A ).sample # 2. compute previous image: x_t -> t_t-1 UpperCAmelCase__ : List[Any] = self.scheduler.step(_A , _A , _A ).prev_sample UpperCAmelCase__ : Any = audio.clamp(-1 , 1 ).float().cpu().numpy() UpperCAmelCase__ : Any = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=_A )
75
1
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase_ ( __a ): lowerCAmelCase__ = ['image_processor', 'tokenizer'] lowerCAmelCase__ = 'ViTImageProcessor' lowerCAmelCase__ = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : int , _A : Tuple=None , _A : Optional[Any]=None , **_A : Any ): '''simple docstring''' UpperCAmelCase__ : Dict = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _A , ) UpperCAmelCase__ : Optional[Any] = kwargs.pop('''feature_extractor''' ) UpperCAmelCase__ : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_A , _A ) def __call__( self : Optional[Any] , _A : str=None , _A : Union[str, Any]=None , _A : int=None , _A : Tuple=None , **_A : int ): '''simple docstring''' if text is None and visual_prompt is None and images is None: raise ValueError('''You have to specify either text, visual prompt or images.''' ) if text is not None and visual_prompt is not None: raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' ) if text is not None: UpperCAmelCase__ : List[str] = self.tokenizer(_A , return_tensors=_A , **_A ) if visual_prompt is not None: UpperCAmelCase__ : Any = self.image_processor(_A , return_tensors=_A , **_A ) if images is not None: UpperCAmelCase__ : List[Any] = self.image_processor(_A , return_tensors=_A , **_A ) if visual_prompt is not None and images is not None: UpperCAmelCase__ : Optional[Any] = { '''pixel_values''': image_features.pixel_values, '''conditional_pixel_values''': prompt_features.pixel_values, } return encoding elif text is not None and images is not None: UpperCAmelCase__ : Optional[Any] = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: UpperCAmelCase__ : List[str] = { '''conditional_pixel_values''': prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**_A ) , tensor_type=_A ) def lowercase_ ( self : Optional[Any] , *_A : List[Any] , **_A : List[str] ): '''simple docstring''' return self.tokenizer.batch_decode(*_A , **_A ) def lowercase_ ( self : Tuple , *_A : List[Any] , **_A : List[str] ): '''simple docstring''' return self.tokenizer.decode(*_A , **_A ) @property def lowercase_ ( self : Tuple ): '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _A , ) return self.image_processor_class @property def lowercase_ ( self : Dict ): '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _A , ) return self.image_processor
75
'''simple docstring''' from math import factorial def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float: if successes > trials: raise ValueError('''successes must be lower or equal to trials''' ) if trials < 0 or successes < 0: raise ValueError('''the function is defined for non-negative integers''' ) if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError('''the function is defined for non-negative integers''' ) if not 0 < prob < 1: raise ValueError('''prob has to be in range of 1 - 0''' ) UpperCAmelCase__ : Any = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! UpperCAmelCase__ : Any = float(factorial(lowerCAmelCase__ ) ) coefficient /= factorial(lowerCAmelCase__ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('''Probability of 2 successes out of 4 trails''') print('''with probability of 0.75 is:''', end=''' ''') print(binomial_distribution(2, 4, 0.75))
75
1
'''simple docstring''' import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( '''The `inpainting.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionInpaintPipeline` instead.''' )
75
'''simple docstring''' from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCamelCase__ = logging.get_logger(__name__) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = ['pixel_values'] def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_A : int , ): '''simple docstring''' super().__init__(**_A ) UpperCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 224} UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A ) UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase__ : List[str] = get_size_dict(_A , param_name='''crop_size''' ) UpperCAmelCase__ : str = do_resize UpperCAmelCase__ : List[Any] = size UpperCAmelCase__ : int = resample UpperCAmelCase__ : int = do_center_crop UpperCAmelCase__ : List[str] = crop_size UpperCAmelCase__ : Union[str, Any] = do_rescale UpperCAmelCase__ : Optional[int] = rescale_factor UpperCAmelCase__ : List[Any] = do_normalize UpperCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN UpperCAmelCase__ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD def lowercase_ ( self : str , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = get_size_dict(_A , default_to_square=_A ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: UpperCAmelCase__ : Tuple = int((256 / 224) * size['''shortest_edge'''] ) UpperCAmelCase__ : Tuple = get_resize_output_image_size(_A , size=_A , default_to_square=_A ) UpperCAmelCase__ : Dict = {'''height''': output_size[0], '''width''': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" ) return resize( _A , size=(size_dict['''height'''], size_dict['''width''']) , resample=_A , data_format=_A , **_A ) def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A ) def lowercase_ ( self : List[str] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ): '''simple docstring''' return rescale(_A , scale=_A , data_format=_A , **_A ) def lowercase_ ( self : Dict , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ): '''simple docstring''' return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def lowercase_ ( self : Optional[Any] , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[TensorType] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ): '''simple docstring''' UpperCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize UpperCAmelCase__ : Optional[int] = resample if resample is not None else self.resample UpperCAmelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean UpperCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std UpperCAmelCase__ : Tuple = size if size is not None else self.size UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A ) UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase__ : int = get_size_dict(_A , param_name='''crop_size''' ) UpperCAmelCase__ : Union[str, Any] = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase__ : int = [to_numpy_array(_A ) for image in images] if do_resize: UpperCAmelCase__ : str = [self.resize(_A , _A , _A ) for image in images] if do_center_crop: UpperCAmelCase__ : Tuple = [self.center_crop(_A , _A ) for image in images] if do_rescale: UpperCAmelCase__ : Optional[int] = [self.rescale(_A , _A ) for image in images] if do_normalize: UpperCAmelCase__ : Any = [self.normalize(_A , _A , _A ) for image in images] UpperCAmelCase__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images] UpperCAmelCase__ : Dict = {'''pixel_values''': images} return BatchFeature(data=_A , tensor_type=_A )
75
1
'''simple docstring''' from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def a__ ( lowerCAmelCase__ = True , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int: if not is_tqdm_available(): raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' ) UpperCAmelCase__ : Union[str, Any] = False if main_process_only: UpperCAmelCase__ : List[str] = PartialState().local_process_index == 0 return _tqdm(*lowerCAmelCase__ , **lowerCAmelCase__ , disable=lowerCAmelCase__ )
75
'''simple docstring''' import math def a__ ( ) -> None: UpperCAmelCase__ : List[str] = input('''Enter message: ''' ) UpperCAmelCase__ : Any = int(input(F"""Enter key [2-{len(lowerCAmelCase__ ) - 1}]: """ ) ) UpperCAmelCase__ : List[str] = input('''Encryption/Decryption [e/d]: ''' ) if mode.lower().startswith('''e''' ): UpperCAmelCase__ : Dict = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ ) elif mode.lower().startswith('''d''' ): UpperCAmelCase__ : Optional[int] = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(F"""Output:\n{text + "|"}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: UpperCAmelCase__ : Optional[int] = [''''''] * key for col in range(lowerCAmelCase__ ): UpperCAmelCase__ : Tuple = col while pointer < len(lowerCAmelCase__ ): cipher_text[col] += message[pointer] pointer += key return "".join(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: UpperCAmelCase__ : int = math.ceil(len(lowerCAmelCase__ ) / key ) UpperCAmelCase__ : Any = key UpperCAmelCase__ : Optional[int] = (num_cols * num_rows) - len(lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = [''''''] * num_cols UpperCAmelCase__ : List[str] = 0 UpperCAmelCase__ : List[Any] = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): UpperCAmelCase__ : Optional[int] = 0 row += 1 return "".join(lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
75
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SEWForCTC''', '''SEWForSequenceClassification''', '''SEWModel''', '''SEWPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
75
'''simple docstring''' class lowerCamelCase_ : def __init__( self : str , _A : Union[str, Any] , _A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = name UpperCAmelCase__ : Union[str, Any] = val def __str__( self : Tuple ): '''simple docstring''' return f"""{self.__class__.__name__}({self.name}, {self.val})""" def __lt__( self : Union[str, Any] , _A : Dict ): '''simple docstring''' return self.val < other.val class lowerCamelCase_ : def __init__( self : int , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = {} UpperCAmelCase__ : int = {} UpperCAmelCase__ : Any = self.build_heap(_A ) def __getitem__( self : Any , _A : Any ): '''simple docstring''' return self.get_value(_A ) def lowercase_ ( self : Any , _A : List[Any] ): '''simple docstring''' return (idx - 1) // 2 def lowercase_ ( self : Union[str, Any] , _A : Optional[int] ): '''simple docstring''' return idx * 2 + 1 def lowercase_ ( self : Tuple , _A : List[Any] ): '''simple docstring''' return idx * 2 + 2 def lowercase_ ( self : List[str] , _A : Tuple ): '''simple docstring''' return self.heap_dict[key] def lowercase_ ( self : str , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = len(_A ) - 1 UpperCAmelCase__ : Tuple = self.get_parent_idx(_A ) for idx, i in enumerate(_A ): UpperCAmelCase__ : Dict = idx UpperCAmelCase__ : Optional[Any] = i.val for i in range(_A , -1 , -1 ): self.sift_down(_A , _A ) return array def lowercase_ ( self : Optional[Any] , _A : str , _A : List[Any] ): '''simple docstring''' while True: UpperCAmelCase__ : Any = self.get_left_child_idx(_A ) # noqa: E741 UpperCAmelCase__ : Optional[Any] = self.get_right_child_idx(_A ) UpperCAmelCase__ : Tuple = idx if l < len(_A ) and array[l] < array[idx]: UpperCAmelCase__ : int = l if r < len(_A ) and array[r] < array[smallest]: UpperCAmelCase__ : Dict = r if smallest != idx: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = array[smallest], array[idx] ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : List[str] = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) UpperCAmelCase__ : str = smallest else: break def lowercase_ ( self : List[str] , _A : int ): '''simple docstring''' UpperCAmelCase__ : str = self.get_parent_idx(_A ) while p >= 0 and self.heap[p] > self.heap[idx]: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.heap[idx], self.heap[p] UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) UpperCAmelCase__ : Union[str, Any] = p UpperCAmelCase__ : List[Any] = self.get_parent_idx(_A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' return self.heap[0] def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Any = self.heap[-1], self.heap[0] UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) UpperCAmelCase__ : int = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def lowercase_ ( self : int , _A : Union[str, Any] ): '''simple docstring''' self.heap.append(_A ) UpperCAmelCase__ : Union[str, Any] = len(self.heap ) - 1 UpperCAmelCase__ : Optional[Any] = node.val self.sift_up(len(self.heap ) - 1 ) def lowercase_ ( self : str ): '''simple docstring''' return len(self.heap ) == 0 def lowercase_ ( self : int , _A : Optional[Any] , _A : str ): '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" UpperCAmelCase__ : Optional[Any] = new_value UpperCAmelCase__ : List[str] = new_value self.sift_up(self.idx_of_element[node] ) UpperCamelCase__ = Node('''R''', -1) UpperCamelCase__ = Node('''B''', 6) UpperCamelCase__ = Node('''A''', 3) UpperCamelCase__ = Node('''X''', 1) UpperCamelCase__ = Node('''E''', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array UpperCamelCase__ = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('''Min Heap - before decrease key''') for i in my_min_heap.heap: print(i) print('''Min Heap - After decrease key of node [B -> -17]''') my_min_heap.decrease_key(b, -1_7) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
75
1
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def a__ ( lowerCAmelCase__ ) -> Any: UpperCAmelCase__ : str = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = emb.weight.shape UpperCAmelCase__ : Dict = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = emb.weight.data return lin_layer def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> str: UpperCAmelCase__ : Union[str, Any] = {} for old_key in state_dict.keys(): UpperCAmelCase__ : Optional[int] = old_key if "moe_layer.experts." in key: if expert_idx is not None: UpperCAmelCase__ : int = key.replace('''moe_layer.experts.0''' , F"""ffn.experts.expert_{expert_idx}""" ) else: UpperCAmelCase__ : int = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' ) if "gate" in key: UpperCAmelCase__ : Tuple = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' ) if "fc2" and "experts" not in key: UpperCAmelCase__ : List[Any] = key.replace('''.fc2.''' , '''.ffn.fc2.''' ) if "fc1" and "experts" not in key: UpperCAmelCase__ : Tuple = key.replace('''.fc1.''' , '''.ffn.fc1.''' ) if ".encoder_attn." in key: UpperCAmelCase__ : Union[str, Any] = key.replace('''.encoder_attn.''' , '''.cross_attention.''' ) if "encoder_attn_layer_norm" in key: UpperCAmelCase__ : List[Any] = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' ) if "final_layer_norm" in key: UpperCAmelCase__ : int = key.replace('''final_layer_norm''' , '''ff_layer_norm''' ) UpperCAmelCase__ : Tuple = state_dict[old_key] return new_dict def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = WEIGHTS_NAME ) -> int: UpperCAmelCase__ : List[Any] = [] UpperCAmelCase__ : Tuple = 0 os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) for expert in range(lowerCAmelCase__ ): UpperCAmelCase__ : Tuple = switch_checkpoint_path + F"""-rank-{expert}.pt""" if os.path.isfile(lowerCAmelCase__ ): UpperCAmelCase__ : int = torch.load(lowerCAmelCase__ )['''model'''] remove_ignore_keys_(lowerCAmelCase__ ) UpperCAmelCase__ : str = rename_fairseq_keys(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : Optional[Any] = os.path.join( lowerCAmelCase__ , weights_name.replace('''.bin''' , F"""-{len(lowerCAmelCase__ )+1:05d}-of-???.bin""" ) ) torch.save(lowerCAmelCase__ , lowerCAmelCase__ ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(lowerCAmelCase__ )[0]].dtype ) # Add the last block UpperCAmelCase__ : Union[str, Any] = os.path.join(lowerCAmelCase__ , weights_name.replace('''.bin''' , F"""-{len(lowerCAmelCase__ )+1:05d}-of-???.bin""" ) ) UpperCAmelCase__ : Optional[int] = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model'''] remove_ignore_keys_(lowerCAmelCase__ ) UpperCAmelCase__ : Dict = rename_fairseq_keys(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : str = shared_weights['''decoder.embed_tokens.weight'''] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(lowerCAmelCase__ ) == 1: UpperCAmelCase__ : Union[str, Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) torch.save(lowerCAmelCase__ , lowerCAmelCase__ ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(lowerCAmelCase__ , lowerCAmelCase__ ) # Otherwise, let's build the index UpperCAmelCase__ : int = {} for idx, shard in enumerate(lowerCAmelCase__ ): UpperCAmelCase__ : List[str] = weights_name.replace('''.bin''' , F"""-{idx+1:05d}-of-{len(lowerCAmelCase__ ):05d}.bin""" ) UpperCAmelCase__ : Tuple = os.path.join(lowerCAmelCase__ , weights_name.replace('''.bin''' , F"""-{idx+1:05d}-of-???.bin""" ) ) os.rename(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) ) for key in shard: UpperCAmelCase__ : Optional[int] = shard_file # Add the metadata UpperCAmelCase__ : str = {'''total_size''': total_size} UpperCAmelCase__ : Optional[int] = {'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , '''w''' , encoding='''utf-8''' ) as f: UpperCAmelCase__ : Optional[int] = json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + '''\n''' f.write(lowerCAmelCase__ ) return metadata, index if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--nllb_moe_checkpoint_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''', type=str, required=False, help='''Path to a directory containing a folder per layer. Follows the original Google format.''', ) parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''') parser.add_argument( '''--pytorch_dump_folder_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''', type=str, required=False, help='''Path to the output pytorch model.''', ) UpperCamelCase__ = parser.parse_args() UpperCamelCase__ , UpperCamelCase__ = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_2_8, args.dtype, ) UpperCamelCase__ = NllbMoeConfig.from_pretrained( '''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8 ) config.save_pretrained(args.pytorch_dump_folder_path) UpperCamelCase__ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('''Done''') model.save_pretrained(args.pytorch_dump_folder_path)
75
'''simple docstring''' import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py UpperCamelCase__ = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. UpperCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS) UpperCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` UpperCamelCase__ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') UpperCamelCase__ = { '''DecisionTransformerConfig''', '''EncoderDecoderConfig''', '''MusicgenConfig''', '''RagConfig''', '''SpeechEncoderDecoderConfig''', '''TimmBackboneConfig''', '''VisionEncoderDecoderConfig''', '''VisionTextDualEncoderConfig''', '''LlamaConfig''', } def a__ ( lowerCAmelCase__ ) -> List[str]: UpperCAmelCase__ : str = None # source code of `config_class` UpperCAmelCase__ : str = inspect.getsource(lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = _re_checkpoint.findall(lowerCAmelCase__ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('''/''' ): UpperCAmelCase__ : List[str] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link UpperCAmelCase__ : Union[str, Any] = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: UpperCAmelCase__ : Any = ckpt_name break return checkpoint def a__ ( ) -> Dict: UpperCAmelCase__ : Optional[Any] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue UpperCAmelCase__ : Any = get_checkpoint_from_config_class(lowerCAmelCase__ ) UpperCAmelCase__ : Optional[int] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0: UpperCAmelCase__ : List[str] = '''\n'''.join(sorted(lowerCAmelCase__ ) ) raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
75
1
'''simple docstring''' from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract UpperCamelCase__ = logging.get_logger(__name__) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]: return [ int(10_00 * (box[0] / width) ), int(10_00 * (box[1] / height) ), int(10_00 * (box[2] / width) ), int(10_00 * (box[3] / height) ), ] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any: UpperCAmelCase__ : Optional[int] = to_pil_image(lowerCAmelCase__ ) UpperCAmelCase__ , UpperCAmelCase__ : int = pil_image.size UpperCAmelCase__ : Any = pytesseract.image_to_data(lowerCAmelCase__ , lang=lowerCAmelCase__ , output_type='''dict''' , config=lowerCAmelCase__ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates UpperCAmelCase__ : Union[str, Any] = [idx for idx, word in enumerate(lowerCAmelCase__ ) if not word.strip()] UpperCAmelCase__ : str = [word for idx, word in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices] UpperCAmelCase__ : Tuple = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices] UpperCAmelCase__ : Tuple = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices] UpperCAmelCase__ : Optional[Any] = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices] UpperCAmelCase__ : str = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format UpperCAmelCase__ : Optional[Any] = [] for x, y, w, h in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase__ : str = [x, y, x + w, y + h] actual_boxes.append(lowerCAmelCase__ ) # finally, normalize the bounding boxes UpperCAmelCase__ : List[Any] = [] for box in actual_boxes: normalized_boxes.append(normalize_box(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) ) assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ), "Not as many words as there are bounding boxes" return words, normalized_boxes class lowerCamelCase_ ( __a ): lowerCAmelCase__ = ['pixel_values'] def __init__( self : Tuple , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : float = 1 / 255 , _A : bool = True , _A : Union[float, Iterable[float]] = None , _A : Union[float, Iterable[float]] = None , _A : bool = True , _A : Optional[str] = None , _A : Optional[str] = "" , **_A : Tuple , ): '''simple docstring''' super().__init__(**_A ) UpperCAmelCase__ : Optional[int] = size if size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase__ : Union[str, Any] = get_size_dict(_A ) UpperCAmelCase__ : Tuple = do_resize UpperCAmelCase__ : Tuple = size UpperCAmelCase__ : int = resample UpperCAmelCase__ : Any = do_rescale UpperCAmelCase__ : Tuple = rescale_value UpperCAmelCase__ : Optional[Any] = do_normalize UpperCAmelCase__ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase__ : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD UpperCAmelCase__ : Optional[int] = apply_ocr UpperCAmelCase__ : List[Any] = ocr_lang UpperCAmelCase__ : Optional[int] = tesseract_config def lowercase_ ( self : List[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ): '''simple docstring''' UpperCAmelCase__ : List[Any] = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) UpperCAmelCase__ : Dict = (size['''height'''], size['''width''']) return resize(_A , size=_A , resample=_A , data_format=_A , **_A ) def lowercase_ ( self : List[str] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ): '''simple docstring''' return rescale(_A , scale=_A , data_format=_A , **_A ) def lowercase_ ( self : Optional[int] , _A : np.ndarray , _A : Union[float, Iterable[float]] , _A : Union[float, Iterable[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ): '''simple docstring''' return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def lowercase_ ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : str=None , _A : bool = None , _A : float = None , _A : bool = None , _A : Union[float, Iterable[float]] = None , _A : Union[float, Iterable[float]] = None , _A : bool = None , _A : Optional[str] = None , _A : Optional[str] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Tuple , ): '''simple docstring''' UpperCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize UpperCAmelCase__ : List[str] = size if size is not None else self.size UpperCAmelCase__ : Any = get_size_dict(_A ) UpperCAmelCase__ : Optional[Any] = resample if resample is not None else self.resample UpperCAmelCase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase__ : Any = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase__ : int = image_mean if image_mean is not None else self.image_mean UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else self.image_std UpperCAmelCase__ : Dict = apply_ocr if apply_ocr is not None else self.apply_ocr UpperCAmelCase__ : Tuple = ocr_lang if ocr_lang is not None else self.ocr_lang UpperCAmelCase__ : List[str] = tesseract_config if tesseract_config is not None else self.tesseract_config UpperCAmelCase__ : Dict = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' ) # All transformations expect numpy arrays. UpperCAmelCase__ : Tuple = [to_numpy_array(_A ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , '''pytesseract''' ) UpperCAmelCase__ : Tuple = [] UpperCAmelCase__ : Optional[Any] = [] for image in images: UpperCAmelCase__ , UpperCAmelCase__ : Tuple = apply_tesseract(_A , _A , _A ) words_batch.append(_A ) boxes_batch.append(_A ) if do_resize: UpperCAmelCase__ : Dict = [self.resize(image=_A , size=_A , resample=_A ) for image in images] if do_rescale: UpperCAmelCase__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: UpperCAmelCase__ : Tuple = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] UpperCAmelCase__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images] UpperCAmelCase__ : Dict = BatchFeature(data={'''pixel_values''': images} , tensor_type=_A ) if apply_ocr: UpperCAmelCase__ : Optional[int] = words_batch UpperCAmelCase__ : Any = boxes_batch return data
75
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'torchsde'] def __init__( self : Tuple , *_A : Any , **_A : Optional[Any] ): '''simple docstring''' requires_backends(self , ['''torch''', '''torchsde'''] ) @classmethod def lowercase_ ( cls : List[Any] , *_A : Tuple , **_A : Tuple ): '''simple docstring''' requires_backends(cls , ['''torch''', '''torchsde'''] ) @classmethod def lowercase_ ( cls : List[str] , *_A : Optional[int] , **_A : Any ): '''simple docstring''' requires_backends(cls , ['''torch''', '''torchsde'''] )
75
1
'''simple docstring''' import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = LayoutLMTokenizer lowerCAmelCase__ = LayoutLMTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = True def lowercase_ ( self : Optional[int] ): '''simple docstring''' super().setUp() UpperCAmelCase__ : Tuple = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] UpperCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def lowercase_ ( self : List[str] , **_A : List[str] ): '''simple docstring''' return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : List[str] , _A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = '''UNwant\u00E9d,running''' UpperCAmelCase__ : Any = '''unwanted, running''' return input_text, output_text def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.tokenizer_class(self.vocab_file ) UpperCAmelCase__ : Optional[int] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [7, 4, 5, 10, 8, 9] ) def lowercase_ ( self : Tuple ): '''simple docstring''' pass
75
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'ctrl' lowerCAmelCase__ = ['past_key_values'] lowerCAmelCase__ = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : List[Any] , _A : Dict=246_534 , _A : Optional[Any]=256 , _A : Dict=1_280 , _A : List[str]=8_192 , _A : Tuple=48 , _A : Optional[Any]=16 , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=1e-6 , _A : Optional[int]=0.0_2 , _A : Tuple=True , **_A : Optional[Any] , ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = vocab_size UpperCAmelCase__ : Any = n_positions UpperCAmelCase__ : Optional[Any] = n_embd UpperCAmelCase__ : List[str] = n_layer UpperCAmelCase__ : Any = n_head UpperCAmelCase__ : int = dff UpperCAmelCase__ : str = resid_pdrop UpperCAmelCase__ : Tuple = embd_pdrop UpperCAmelCase__ : int = layer_norm_epsilon UpperCAmelCase__ : Tuple = initializer_range UpperCAmelCase__ : Union[str, Any] = use_cache super().__init__(**_A )
75
1
'''simple docstring''' from torch import nn def a__ ( lowerCAmelCase__ ) -> List[Any]: if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F"""Unsupported activation function: {act_fn}""" )
75
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'pytorch', 'script': 'run_ddp.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'tensorflow', 'script': 'run_tf_dist.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7}, }, ] ) class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : List[str] ): '''simple docstring''' if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , ) assert hasattr(self , '''env''' ) def lowercase_ ( self : List[Any] , _A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}""" # distributed data settings UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , ) def lowercase_ ( self : Optional[int] , _A : Any ): '''simple docstring''' TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(2,)] ) def lowercase_ ( self : Optional[int] , _A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A ) # run training estimator.fit() # result dataframe UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCAmelCase__ : Any = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
75
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ): lowerCAmelCase__ = StableDiffusionInpaintPipeline lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowerCAmelCase__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowerCAmelCase__ = frozenset([] ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , ) UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=_A ) torch.manual_seed(0 ) UpperCAmelCase__ : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , ) UpperCAmelCase__ : Union[str, Any] = CLIPTextModel(_A ) UpperCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCAmelCase__ : str = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowercase_ ( self : str , _A : Dict , _A : Any=0 ): '''simple docstring''' UpperCAmelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((64, 64) ) UpperCAmelCase__ : int = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) ) if str(_A ).startswith('''mps''' ): UpperCAmelCase__ : List[Any] = torch.manual_seed(_A ) else: UpperCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase__ : Optional[int] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': init_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : Tuple = self.get_dummy_components() UpperCAmelCase__ : str = StableDiffusionInpaintPipeline(**_A ) UpperCAmelCase__ : List[str] = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : Dict = self.get_dummy_inputs(_A ) UpperCAmelCase__ : Any = sd_pipe(**_A ).images UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : int = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self : Tuple ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : List[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase__ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase__ : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench.npy''' ) UpperCAmelCase__ : Dict = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase__ : str = torch.manual_seed(0 ) UpperCAmelCase__ : str = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , ) UpperCAmelCase__ : int = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 9e-3 def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase__ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase__ : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' ) UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase__ : Any = StableDiffusionInpaintPipeline.from_pretrained( _A , torch_dtype=torch.floataa , safety_checker=_A , ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , ) UpperCAmelCase__ : Tuple = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def lowercase_ ( self : Any ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase__ : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase__ : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase__ : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase__ : str = PNDMScheduler.from_pretrained(_A , subfolder='''scheduler''' ) UpperCAmelCase__ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained( _A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase__ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase__ : Tuple = torch.manual_seed(0 ) UpperCAmelCase__ : Any = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , ) UpperCAmelCase__ : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.6_5 * 10**9
75
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets UpperCamelCase__ = '''\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } ''' UpperCamelCase__ = '''\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy. ''' UpperCamelCase__ = R''' Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting "1/2" to "\\frac{1}{2}") Examples: >>> metric = datasets.load_metric("competition_math") >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"]) >>> print(results) {\'accuracy\': 1.0} ''' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): def lowercase_ ( self : Dict ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , ) def lowercase_ ( self : Any , _A : str , _A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 0.0 for i, j in zip(_A , _A ): n_correct += 1.0 if math_equivalence.is_equiv(_A , _A ) else 0.0 UpperCAmelCase__ : Dict = n_correct / len(_A ) return { "accuracy": accuracy, }
75
1
'''simple docstring''' from datetime import datetime import requests def a__ ( lowerCAmelCase__ ) -> bytes: UpperCAmelCase__ : Any = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url=''' UpperCAmelCase__ : str = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src'''] return requests.get(lowerCAmelCase__ ).content if __name__ == "__main__": UpperCamelCase__ = input('''Enter Video/IGTV url: ''').strip() UpperCamelCase__ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, '''wb''') as fp: fp.write(download_video(url)) print(F"""Done. Video saved to disk as {file_name}.""")
75
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase__ = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''', }, } UpperCamelCase__ = { '''facebook/bart-base''': 1_0_2_4, '''facebook/bart-large''': 1_0_2_4, '''facebook/bart-large-mnli''': 1_0_2_4, '''facebook/bart-large-cnn''': 1_0_2_4, '''facebook/bart-large-xsum''': 1_0_2_4, '''yjernite/bart_eli5''': 1_0_2_4, } class lowerCamelCase_ ( __a ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = ['input_ids', 'attention_mask'] lowerCAmelCase__ = BartTokenizer def __init__( self : Tuple , _A : List[str]=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Tuple="replace" , _A : Optional[Any]="<s>" , _A : int="</s>" , _A : Optional[Any]="</s>" , _A : List[str]="<s>" , _A : Optional[int]="<unk>" , _A : Optional[int]="<pad>" , _A : str="<mask>" , _A : Dict=False , _A : int=True , **_A : Optional[Any] , ): '''simple docstring''' super().__init__( _A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , ) UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space: UpperCAmelCase__ : str = getattr(_A , pre_tok_state.pop('''type''' ) ) UpperCAmelCase__ : Any = add_prefix_space UpperCAmelCase__ : str = pre_tok_class(**_A ) UpperCAmelCase__ : Dict = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` UpperCAmelCase__ : Optional[Any] = '''post_processor''' UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , _A , _A ) if tokenizer_component_instance: UpperCAmelCase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCAmelCase__ : Union[str, Any] = tuple(state['''sep'''] ) if "cls" in state: UpperCAmelCase__ : Union[str, Any] = tuple(state['''cls'''] ) UpperCAmelCase__ : Dict = False if state.get('''add_prefix_space''' , _A ) != add_prefix_space: UpperCAmelCase__ : Union[str, Any] = add_prefix_space UpperCAmelCase__ : Dict = True if state.get('''trim_offsets''' , _A ) != trim_offsets: UpperCAmelCase__ : List[Any] = trim_offsets UpperCAmelCase__ : List[Any] = True if changes_to_apply: UpperCAmelCase__ : Dict = getattr(_A , state.pop('''type''' ) ) UpperCAmelCase__ : Union[str, Any] = component_class(**_A ) setattr(self.backend_tokenizer , _A , _A ) @property def lowercase_ ( self : Dict ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def lowercase_ ( self : Dict , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value UpperCAmelCase__ : str = value def lowercase_ ( self : Optional[int] , *_A : List[str] , **_A : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = kwargs.get('''is_split_into_words''' , _A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*_A , **_A ) def lowercase_ ( self : Optional[Any] , *_A : Union[str, Any] , **_A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = kwargs.get('''is_split_into_words''' , _A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*_A , **_A ) def lowercase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ): '''simple docstring''' UpperCAmelCase__ : str = self._tokenizer.model.save(_A , name=_A ) return tuple(_A ) def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Optional[int]=None ): '''simple docstring''' UpperCAmelCase__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase_ ( self : int , _A : List[int] , _A : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [self.sep_token_id] UpperCAmelCase__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
75
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def a__ ( lowerCAmelCase__=None ) -> List[Any]: if subparsers is not None: UpperCAmelCase__ : Union[str, Any] = subparsers.add_parser('''test''' ) else: UpperCAmelCase__ : List[Any] = argparse.ArgumentParser('''Accelerate test command''' ) parser.add_argument( '''--config_file''' , default=lowerCAmelCase__ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=lowerCAmelCase__ ) return parser def a__ ( lowerCAmelCase__ ) -> Union[str, Any]: UpperCAmelCase__ : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] ) if args.config_file is None: UpperCAmelCase__ : Dict = script_name else: UpperCAmelCase__ : Optional[Any] = F"""--config_file={args.config_file} {script_name}""" UpperCAmelCase__ : Any = ['''accelerate-launch'''] + test_args.split() UpperCAmelCase__ : List[str] = execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() ) if result.returncode == 0: print('''Test is a success! You are ready for your distributed training!''' ) def a__ ( ) -> Union[str, Any]: UpperCAmelCase__ : str = test_command_parser() UpperCAmelCase__ : Tuple = parser.parse_args() test_command(lowerCAmelCase__ ) if __name__ == "__main__": main()
75
'''simple docstring''' import random from typing import Any def a__ ( lowerCAmelCase__ ) -> list[Any]: for _ in range(len(lowerCAmelCase__ ) ): UpperCAmelCase__ : int = random.randint(0 , len(lowerCAmelCase__ ) - 1 ) UpperCAmelCase__ : Optional[int] = random.randint(0 , len(lowerCAmelCase__ ) - 1 ) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = data[b], data[a] return data if __name__ == "__main__": UpperCamelCase__ = [0, 1, 2, 3, 4, 5, 6, 7] UpperCamelCase__ = ['''python''', '''says''', '''hello''', '''!'''] print('''Fisher-Yates Shuffle:''') print('''List''', integers, strings) print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
75
1
'''simple docstring''' import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py UpperCamelCase__ = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. UpperCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS) UpperCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` UpperCamelCase__ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') UpperCamelCase__ = { '''DecisionTransformerConfig''', '''EncoderDecoderConfig''', '''MusicgenConfig''', '''RagConfig''', '''SpeechEncoderDecoderConfig''', '''TimmBackboneConfig''', '''VisionEncoderDecoderConfig''', '''VisionTextDualEncoderConfig''', '''LlamaConfig''', } def a__ ( lowerCAmelCase__ ) -> List[str]: UpperCAmelCase__ : str = None # source code of `config_class` UpperCAmelCase__ : str = inspect.getsource(lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = _re_checkpoint.findall(lowerCAmelCase__ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('''/''' ): UpperCAmelCase__ : List[str] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link UpperCAmelCase__ : Union[str, Any] = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: UpperCAmelCase__ : Any = ckpt_name break return checkpoint def a__ ( ) -> Dict: UpperCAmelCase__ : Optional[Any] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue UpperCAmelCase__ : Any = get_checkpoint_from_config_class(lowerCAmelCase__ ) UpperCAmelCase__ : Optional[int] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0: UpperCAmelCase__ : List[str] = '''\n'''.join(sorted(lowerCAmelCase__ ) ) raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
75
'''simple docstring''' import math def a__ ( lowerCAmelCase__ ) -> list[int]: UpperCAmelCase__ : List[Any] = [] UpperCAmelCase__ : Dict = 2 UpperCAmelCase__ : Optional[Any] = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment UpperCAmelCase__ : str = [True] * (end + 1) UpperCAmelCase__ : Any = [] while start <= end: if temp[start] is True: in_prime.append(lowerCAmelCase__ ) for i in range(start * start , end + 1 , lowerCAmelCase__ ): UpperCAmelCase__ : Dict = False start += 1 prime += in_prime UpperCAmelCase__ : Optional[int] = end + 1 UpperCAmelCase__ : str = min(2 * end , lowerCAmelCase__ ) while low <= n: UpperCAmelCase__ : List[str] = [True] * (high - low + 1) for each in in_prime: UpperCAmelCase__ : List[str] = math.floor(low / each ) * each if t < low: t += each for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ): UpperCAmelCase__ : Union[str, Any] = False for j in range(len(lowerCAmelCase__ ) ): if temp[j] is True: prime.append(j + low ) UpperCAmelCase__ : Union[str, Any] = high + 1 UpperCAmelCase__ : str = min(high + end , lowerCAmelCase__ ) return prime print(sieve(1_0**6))
75
1
'''simple docstring''' import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCamelCase_ ( unittest.TestCase ): @property def lowercase_ ( self : List[str] ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : Tuple = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Any = self.dummy_uncond_unet UpperCAmelCase__ : str = ScoreSdeVeScheduler() UpperCAmelCase__ : Tuple = ScoreSdeVePipeline(unet=_A , scheduler=_A ) sde_ve.to(_A ) sde_ve.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : int = torch.manual_seed(0 ) UpperCAmelCase__ : Any = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=_A ).images UpperCAmelCase__ : Tuple = torch.manual_seed(0 ) UpperCAmelCase__ : Dict = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=_A , return_dict=_A )[ 0 ] UpperCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1] UpperCAmelCase__ : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase__ : Union[str, Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = '''google/ncsnpp-church-256''' UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained(_A ) UpperCAmelCase__ : Optional[Any] = ScoreSdeVeScheduler.from_pretrained(_A ) UpperCAmelCase__ : int = ScoreSdeVePipeline(unet=_A , scheduler=_A ) sde_ve.to(_A ) sde_ve.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : Any = torch.manual_seed(0 ) UpperCAmelCase__ : int = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=_A ).images UpperCAmelCase__ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) UpperCAmelCase__ : List[str] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
75
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ): lowerCAmelCase__ = StableDiffusionInpaintPipeline lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowerCAmelCase__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowerCAmelCase__ = frozenset([] ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , ) UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=_A ) torch.manual_seed(0 ) UpperCAmelCase__ : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , ) UpperCAmelCase__ : Union[str, Any] = CLIPTextModel(_A ) UpperCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCAmelCase__ : str = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowercase_ ( self : str , _A : Dict , _A : Any=0 ): '''simple docstring''' UpperCAmelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((64, 64) ) UpperCAmelCase__ : int = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) ) if str(_A ).startswith('''mps''' ): UpperCAmelCase__ : List[Any] = torch.manual_seed(_A ) else: UpperCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase__ : Optional[int] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': init_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : Tuple = self.get_dummy_components() UpperCAmelCase__ : str = StableDiffusionInpaintPipeline(**_A ) UpperCAmelCase__ : List[str] = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : Dict = self.get_dummy_inputs(_A ) UpperCAmelCase__ : Any = sd_pipe(**_A ).images UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : int = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self : Tuple ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : List[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase__ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase__ : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench.npy''' ) UpperCAmelCase__ : Dict = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase__ : str = torch.manual_seed(0 ) UpperCAmelCase__ : str = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , ) UpperCAmelCase__ : int = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 9e-3 def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase__ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase__ : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' ) UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase__ : Any = StableDiffusionInpaintPipeline.from_pretrained( _A , torch_dtype=torch.floataa , safety_checker=_A , ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , ) UpperCAmelCase__ : Tuple = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def lowercase_ ( self : Any ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase__ : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase__ : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase__ : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase__ : str = PNDMScheduler.from_pretrained(_A , subfolder='''scheduler''' ) UpperCAmelCase__ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained( _A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase__ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase__ : Tuple = torch.manual_seed(0 ) UpperCAmelCase__ : Any = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , ) UpperCAmelCase__ : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.6_5 * 10**9
75
1
'''simple docstring''' def a__ ( lowerCAmelCase__ = 10_00 ) -> int: return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
75
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCamelCase__ = '''platform''' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Tuple: if attention_mask is None: UpperCAmelCase__ : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: UpperCAmelCase__ : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: UpperCAmelCase__ : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase__ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase__ : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class lowerCamelCase_ : def __init__( self : Optional[Any] , _A : Optional[Any] , _A : str=13 , _A : int=7 , _A : Any=True , _A : List[Any]=False , _A : Optional[int]=99 , _A : Optional[int]=16 , _A : int=2 , _A : Optional[int]=4 , _A : Optional[int]=4 , _A : int="gelu" , _A : List[str]=0.1 , _A : str=0.1 , _A : int=32 , _A : Optional[int]=2 , _A : int=1 , _A : Dict=0 , _A : Dict=0.0_2 , ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = parent UpperCAmelCase__ : str = batch_size UpperCAmelCase__ : Dict = seq_length UpperCAmelCase__ : str = is_training UpperCAmelCase__ : int = use_labels UpperCAmelCase__ : Union[str, Any] = vocab_size UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : Any = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : str = hidden_act UpperCAmelCase__ : str = hidden_dropout_prob UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase__ : Union[str, Any] = max_position_embeddings UpperCAmelCase__ : int = eos_token_id UpperCAmelCase__ : Optional[int] = pad_token_id UpperCAmelCase__ : List[str] = bos_token_id UpperCAmelCase__ : Union[str, Any] = initializer_range def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) UpperCAmelCase__ : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) UpperCAmelCase__ : List[Any] = shift_tokens_right(_A , 1 , 2 ) UpperCAmelCase__ : List[Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , ) UpperCAmelCase__ : Tuple = prepare_blenderbot_inputs_dict(_A , _A , _A ) return config, inputs_dict def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs() return config, inputs_dict def lowercase_ ( self : int , _A : List[Any] , _A : Optional[Any] , _A : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = 20 UpperCAmelCase__ : int = model_class_name(_A ) UpperCAmelCase__ : str = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase__ , UpperCAmelCase__ : Dict = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _A , _A ) UpperCAmelCase__ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) UpperCAmelCase__ : str = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase__ : str = model.decode( decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , ) UpperCAmelCase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase__ : Tuple = model.decode( decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , ) UpperCAmelCase__ : int = model.decode(_A , _A ) UpperCAmelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def lowercase_ ( self : Tuple , _A : List[Any] , _A : Tuple , _A : Tuple ): '''simple docstring''' UpperCAmelCase__ : Tuple = 20 UpperCAmelCase__ : Optional[int] = model_class_name(_A ) UpperCAmelCase__ : Optional[int] = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase__ : Any = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) UpperCAmelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A ) UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase__ : int = model.decode( decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , ) UpperCAmelCase__ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase__ : Any = model.decode( decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , ) UpperCAmelCase__ : List[str] = model.decode(_A , _A , decoder_attention_mask=_A ) UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class lowerCamelCase_ ( unittest.TestCase ): lowerCAmelCase__ = 9_9 def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) UpperCAmelCase__ : int = input_ids.shape[0] UpperCAmelCase__ : List[str] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._get_config_and_data() UpperCAmelCase__ : Any = FlaxBlenderbotForConditionalGeneration(_A ) UpperCAmelCase__ : Optional[int] = lm_model(input_ids=_A ) UpperCAmelCase__ : Dict = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) UpperCAmelCase__ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_A ) UpperCAmelCase__ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) UpperCAmelCase__ : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) UpperCAmelCase__ : Tuple = lm_model(input_ids=_A , decoder_input_ids=_A ) UpperCAmelCase__ : int = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) UpperCAmelCase__ : Union[str, Any] = shift_tokens_right(_A , 1 , 2 ) UpperCAmelCase__ : str = np.equal(_A , 1 ).astype(np.floataa ).sum() UpperCAmelCase__ : Dict = np.equal(_A , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(_A , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class lowerCamelCase_ ( __a , unittest.TestCase , __a ): lowerCAmelCase__ = True lowerCAmelCase__ = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = FlaxBlenderbotModelTester(self ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_A , _A , _A ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : Dict = self._prepare_for_class(_A , _A ) UpperCAmelCase__ : str = model_class(_A ) @jax.jit def encode_jitted(_A : Any , _A : Tuple=None , **_A : Optional[int] ): return model.encode(input_ids=_A , attention_mask=_A ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase__ : Optional[Any] = encode_jitted(**_A ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase__ : Tuple = encode_jitted(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) ) for jitted_output, output in zip(_A , _A ): self.assertEqual(jitted_output.shape , output.shape ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : List[str] = model_class(_A ) UpperCAmelCase__ : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) UpperCAmelCase__ : Tuple = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(_A : Optional[int] , _A : List[Any] , _A : int ): return model.decode( decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase__ : Any = decode_jitted(**_A ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase__ : Optional[int] = decode_jitted(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) ) for jitted_output, output in zip(_A , _A ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowercase_ ( self : List[str] ): '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids UpperCAmelCase__ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id UpperCAmelCase__ : Union[str, Any] = model(_A ) self.assertIsNotNone(_A ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25} UpperCAmelCase__ : int = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} UpperCAmelCase__ : str = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_A ) UpperCAmelCase__ : Optional[Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) UpperCAmelCase__ : Optional[Any] = ['''Sam'''] UpperCAmelCase__ : Dict = tokenizer(_A , return_tensors='''jax''' ) UpperCAmelCase__ : List[str] = model.generate(**_A , **_A ) UpperCAmelCase__ : Dict = '''Sam is a great name. It means "sun" in Gaelic.''' UpperCAmelCase__ : Any = tokenizer.batch_decode(_A , **_A ) assert generated_txt[0].strip() == tgt_text
75
1
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers UpperCamelCase__ = '''3''' print('''Python version:''', sys.version) print('''transformers version:''', transformers.__version__) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) print('''NCCL version:''', torch.cuda.nccl.version()) except ImportError: print('''Torch version:''', None) try: import deepspeed print('''DeepSpeed version:''', deepspeed.__version__) except ImportError: print('''DeepSpeed version:''', None) try: import tensorflow as tf print('''TensorFlow version:''', tf.__version__) print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU'''))) print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU'''))) except ImportError: print('''TensorFlow version:''', None)
75
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class lowerCamelCase_ ( datasets.BeamBasedBuilder ): def lowercase_ ( self : str ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_A , ) def lowercase_ ( self : int , _A : Optional[int] , _A : Optional[Any] ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )] def lowercase_ ( self : Union[str, Any] , _A : str , _A : Union[str, Any] ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_A ) class lowerCamelCase_ ( datasets.BeamBasedBuilder ): def lowercase_ ( self : Any ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_A , ) def lowercase_ ( self : Any , _A : List[str] , _A : Any ): '''simple docstring''' return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} ) ] def lowercase_ ( self : List[str] , _A : Optional[int] , _A : Tuple ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_A ) def a__ ( ) -> Tuple: return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] def a__ ( ) -> Optional[Any]: return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] class lowerCamelCase_ ( __a ): @require_beam def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Any = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) UpperCAmelCase__ : Union[str, Any] = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A ) self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def lowercase_ ( self : Any ): '''simple docstring''' import apache_beam as beam UpperCAmelCase__ : List[str] = beam.io.parquetio.WriteToParquet UpperCAmelCase__ : int = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Optional[int] = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' ) with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock: UpperCAmelCase__ : Dict = partial(_A , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( _A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertTrue( os.path.exists( os.path.join( _A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) ) self.assertTrue( os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def lowercase_ ( self : int ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Tuple = DummyBeamDataset(cache_dir=_A ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : int = NestedBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) ) UpperCAmelCase__ : Optional[int] = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A ) self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset
75
1
'''simple docstring''' from __future__ import annotations def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> tuple[int, int]: if b == 0: return (1, 0) ((UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = extended_euclid(lowerCAmelCase__ , a % b ) UpperCAmelCase__ : List[str] = a // b return (y, x - k * y) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: ((UpperCAmelCase__) , (UpperCAmelCase__)) : Optional[int] = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : Optional[int] = na * na UpperCAmelCase__ : Any = ra * x * na + ra * y * na return (n % m + m) % m def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> int: ((UpperCAmelCase__) , (UpperCAmelCase__)) : Tuple = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ ) if b < 0: UpperCAmelCase__ : int = (b % n + n) % n return b def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: UpperCAmelCase__ , UpperCAmelCase__ : List[str] = invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ ), invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : Any = na * na UpperCAmelCase__ : Optional[int] = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name='''chinese_remainder_theorem''', verbose=True) testmod(name='''chinese_remainder_theorem2''', verbose=True) testmod(name='''invert_modulo''', verbose=True) testmod(name='''extended_euclid''', verbose=True)
75
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input UpperCamelCase__ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def a__ ( ) -> List[str]: UpperCAmelCase__ : Optional[int] = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: UpperCAmelCase__ : Any = get_sagemaker_input() else: UpperCAmelCase__ : List[str] = get_cluster_input() return config def a__ ( lowerCAmelCase__=None ) -> List[Any]: if subparsers is not None: UpperCAmelCase__ : Union[str, Any] = subparsers.add_parser('''config''' , description=lowerCAmelCase__ ) else: UpperCAmelCase__ : Dict = argparse.ArgumentParser('''Accelerate config command''' , description=lowerCAmelCase__ ) parser.add_argument( '''--config_file''' , default=lowerCAmelCase__ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=lowerCAmelCase__ ) return parser def a__ ( lowerCAmelCase__ ) -> List[Any]: UpperCAmelCase__ : List[Any] = get_user_input() if args.config_file is not None: UpperCAmelCase__ : Any = args.config_file else: if not os.path.isdir(lowerCAmelCase__ ): os.makedirs(lowerCAmelCase__ ) UpperCAmelCase__ : int = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(lowerCAmelCase__ ) else: config.to_yaml_file(lowerCAmelCase__ ) print(F"""accelerate configuration saved at {config_file}""" ) def a__ ( ) -> str: UpperCAmelCase__ : Optional[int] = config_command_parser() UpperCAmelCase__ : Any = parser.parse_args() config_command(lowerCAmelCase__ ) if __name__ == "__main__": main()
75
1
'''simple docstring''' import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: # Initialise PyTorch model UpperCAmelCase__ : Dict = LxmertConfig.from_json_file(lowerCAmelCase__ ) print(F"""Building PyTorch model from configuration: {config}""" ) UpperCAmelCase__ : str = LxmertForPreTraining(lowerCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCamelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
75
'''simple docstring''' import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: # Construct model if gpta_config_file == "": UpperCAmelCase__ : Optional[Any] = GPTaConfig() else: UpperCAmelCase__ : Tuple = GPTaConfig.from_json_file(lowerCAmelCase__ ) UpperCAmelCase__ : Dict = GPTaModel(lowerCAmelCase__ ) # Load weights from numpy load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Save pytorch-model UpperCAmelCase__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME UpperCAmelCase__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , lowerCAmelCase__ ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--gpt2_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained OpenAI model. \n''' '''This specifies the model architecture.''' ), ) UpperCamelCase__ = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
75
1