code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> int: '''simple docstring''' if len(_UpperCamelCase ) != len(_UpperCamelCase ): raise ValueError("""String lengths must match!""" ) __UpperCAmelCase : Dict = 0 for chara, chara in zip(_UpperCamelCase , _UpperCamelCase ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = 0 while b > 0: if b & 1: __UpperCAmelCase : int = ((res % c) + (a % c)) % c a += a b >>= 1 return res
320
1
"""simple docstring""" import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def lowerCamelCase ( _UpperCamelCase : List[str] ) -> Union[str, Any]: '''simple docstring''' def wrapper(*_UpperCamelCase : Optional[int] , **_UpperCamelCase : Any ): __UpperCAmelCase : Optional[Any] = timeit.default_timer() __UpperCAmelCase : List[str] = func(*_UpperCamelCase , **_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = timeit.default_timer() - starttime return delta __UpperCAmelCase : Optional[Any] = func.__name__ return wrapper def lowerCamelCase ( _UpperCamelCase : dict , _UpperCamelCase : str=1_0_0 , _UpperCamelCase : Optional[int]=None ) -> List[str]: '''simple docstring''' __UpperCAmelCase : str = [] __UpperCAmelCase : List[str] = seq_shapes or {} for i in range(_UpperCamelCase ): __UpperCAmelCase : Union[str, Any] = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(_UpperCamelCase , _ArrayXD ): __UpperCAmelCase : Any = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(_UpperCamelCase , datasets.Value ): if v.dtype == "string": __UpperCAmelCase : Tuple = """The small grey turtle was surprisingly fast when challenged.""" else: __UpperCAmelCase : str = np.random.randint(1_0 , size=1 ).astype(v.dtype ).item() elif isinstance(_UpperCamelCase , datasets.Sequence ): while isinstance(_UpperCamelCase , datasets.Sequence ): __UpperCAmelCase : str = v.feature __UpperCAmelCase : Union[str, Any] = seq_shapes[k] __UpperCAmelCase : str = np.random.rand(*_UpperCamelCase ).astype(v.dtype ) __UpperCAmelCase : List[str] = data dummy_data.append((i, example) ) return dummy_data def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=1_0_0 , _UpperCamelCase : str=None ) -> List[str]: '''simple docstring''' __UpperCAmelCase : List[Any] = generate_examples(_UpperCamelCase , num_examples=_UpperCamelCase , seq_shapes=_UpperCamelCase ) with ArrowWriter(features=_UpperCamelCase , path=_UpperCamelCase ) as writer: for key, record in dummy_data: __UpperCAmelCase : int = features.encode_example(_UpperCamelCase ) writer.write(_UpperCamelCase ) __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = writer.finalize() if not num_final_examples == num_examples: raise ValueError( f'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' ) __UpperCAmelCase : Tuple = datasets.Dataset.from_file(filename=_UpperCamelCase , info=datasets.DatasetInfo(features=_UpperCamelCase ) ) return dataset
320
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""image_processor""", """tokenizer"""] __a = """AutoImageProcessor""" __a = """AutoTokenizer""" def __init__( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ): '''simple docstring''' super().__init__(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : str = self.image_processor def __call__( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=None , **UpperCamelCase : Optional[int] ): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: __UpperCAmelCase : List[str] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if images is not None: __UpperCAmelCase : Optional[Any] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: __UpperCAmelCase : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def lowerCamelCase__ ( self : List[str] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
320
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""keras_nlp"""] def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ): '''simple docstring''' requires_backends(self , ["""keras_nlp"""] )
320
"""simple docstring""" from __future__ import annotations def lowerCamelCase ( _UpperCamelCase : list[float] , _UpperCamelCase : list[float] ) -> float: '''simple docstring''' __UpperCAmelCase : Tuple = sorted(numsa + numsa ) __UpperCAmelCase ,__UpperCAmelCase : Dict = divmod(len(_UpperCamelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : List[Any] = [float(x) for x in input('Enter the elements of first array: ').split()] UpperCAmelCase : Optional[int] = [float(x) for x in input('Enter the elements of second array: ').split()] print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
320
1
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""image_processor""", """tokenizer"""] __a = """AutoImageProcessor""" __a = """AutoTokenizer""" def __init__( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ): '''simple docstring''' super().__init__(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : str = self.image_processor def __call__( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=None , **UpperCamelCase : Optional[int] ): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: __UpperCAmelCase : List[str] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if images is not None: __UpperCAmelCase : Optional[Any] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: __UpperCAmelCase : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def lowerCamelCase__ ( self : List[str] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
320
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Optional[int] = tokenizer("""This is me""" , return_tensors="""pt""" ) __UpperCAmelCase : int = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) __UpperCAmelCase : Tuple = model.generate(**UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) __UpperCAmelCase : Tuple = model_reloaded.generate(**UpperCamelCase ) self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase ) ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Any = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(UpperCamelCase ): model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() model.save_pretrained(UpperCamelCase )
320
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : list , _UpperCamelCase : list , _UpperCamelCase : int ) -> int: '''simple docstring''' if len(_UpperCamelCase ) != len(_UpperCamelCase ): raise ValueError("""The length of profit and weight must be same.""" ) if max_weight <= 0: raise ValueError("""max_weight must greater than zero.""" ) if any(p < 0 for p in profit ): raise ValueError("""Profit can not be negative.""" ) if any(w < 0 for w in weight ): raise ValueError("""Weight can not be negative.""" ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. __UpperCAmelCase : List[str] = [p / w for p, w in zip(_UpperCamelCase , _UpperCamelCase )] # Creating a copy of the list and sorting profit/weight in ascending order __UpperCAmelCase : List[Any] = sorted(_UpperCamelCase ) # declaring useful variables __UpperCAmelCase : int = len(_UpperCamelCase ) __UpperCAmelCase : Any = 0 __UpperCAmelCase : Optional[Any] = 0 __UpperCAmelCase : Dict = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight __UpperCAmelCase : Optional[int] = sorted_profit_by_weight[length - i - 1] __UpperCAmelCase : Optional[Any] = profit_by_weight.index(_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( 'Input profits, weights, and then max_weight (all positive ints) separated by ' 'spaces.' ) UpperCAmelCase : List[Any] = [int(x) for x in input('Input profits separated by spaces: ').split()] UpperCAmelCase : List[Any] = [int(x) for x in input('Input weights separated by spaces: ').split()] UpperCAmelCase : Any = int(input('Max weight allowed: ')) # Function Call calc_profit(profit, weight, max_weight)
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCAmelCase : Dict = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" from __future__ import annotations def lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple[str, float]: '''simple docstring''' if (stress, tangential_force, area).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif stress < 0: raise ValueError("""Stress cannot be negative""" ) elif tangential_force < 0: raise ValueError("""Tangential Force cannot be negative""" ) elif area < 0: raise ValueError("""Area cannot be negative""" ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase : List[str] = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def lowerCamelCase ( ) -> str: '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Tuple = 9, 1_4 # noqa: F841 __UpperCAmelCase : Any = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 1_4], [3, 4, 9], [5, 4, 1_0], [1, 7, 1_1], ] __UpperCAmelCase : List[str] = defaultdict(_UpperCamelCase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) __UpperCAmelCase : Dict = mst(_UpperCamelCase ) __UpperCAmelCase : List[str] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: __UpperCAmelCase : Tuple = tuple(answer[:2] ) __UpperCAmelCase : Any = tuple(edge[::-1] ) assert edge in result or reverse in result
320
"""simple docstring""" def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = [] __UpperCAmelCase : List[str] = 1 while len(_UpperCamelCase ) < 1E6: constant.append(str(_UpperCamelCase ) ) i += 1 __UpperCAmelCase : List[str] = """""".join(_UpperCamelCase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[9_9] ) * int(constant[9_9_9] ) * int(constant[9_9_9_9] ) * int(constant[9_9_9_9_9] ) * int(constant[9_9_9_9_9_9] ) ) if __name__ == "__main__": print(solution())
320
1
"""simple docstring""" import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def lowerCamelCase ( _UpperCamelCase : List[str] ) -> str: '''simple docstring''' __UpperCAmelCase : List[str] = [] for line in lines: __UpperCAmelCase : Any = re.sub(R"""#.*""" , """""" , _UpperCamelCase ) # remove comments if line: filtered_lines.append(_UpperCamelCase ) __UpperCAmelCase : Optional[int] = """\n""".join(_UpperCamelCase ) # Make a hash from all this code __UpperCAmelCase : Dict = full_str.encode("""utf-8""" ) return shaaaa(_UpperCamelCase ).hexdigest() # get importable module names and hash for caching UpperCAmelCase : Union[str, Any] = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCAmelCase : Optional[int] = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCAmelCase : List[str] = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCAmelCase : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Tuple = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets UpperCAmelCase : str = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' UpperCAmelCase : List[Any] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n' UpperCAmelCase : Union[str, Any] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): """simple docstring""" def lowerCamelCase__ ( self : Any ): '''simple docstring''' if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ): raise ImportWarning( """To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n""" """You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[ """https://github.com/m-popovic/chrF""", ] , ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : int = CHRF.CHAR_ORDER , UpperCamelCase : int = CHRF.WORD_ORDER , UpperCamelCase : int = CHRF.BETA , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , ): '''simple docstring''' __UpperCAmelCase : Tuple = len(references[0] ) if any(len(UpperCamelCase ) != references_per_prediction for refs in references ): raise ValueError("""Sacrebleu requires the same number of references for each prediction""" ) __UpperCAmelCase : int = [[refs[i] for refs in references] for i in range(UpperCamelCase )] __UpperCAmelCase : str = CHRF(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = sb_chrf.corpus_score(UpperCamelCase , UpperCamelCase ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
320
"""simple docstring""" import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput UpperCAmelCase : Optional[Any] = 'scheduler_config.json' class lowerCamelCase__ ( A ): """simple docstring""" __a = 1 __a = 2 __a = 3 __a = 4 __a = 5 __a = 6 __a = 7 __a = 8 __a = 9 __a = 10 __a = 11 __a = 12 __a = 13 __a = 14 @dataclass class lowerCamelCase__ ( A ): """simple docstring""" __a = 42 class lowerCamelCase__ : """simple docstring""" __a = SCHEDULER_CONFIG_NAME __a = [] __a = True @classmethod def lowerCamelCase__ ( cls : Any , UpperCamelCase : Dict[str, Any] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[Any] = cls.load_config( pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , ) return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : Union[str, os.PathLike] , UpperCamelCase : bool = False , **UpperCamelCase : Optional[Any] ): '''simple docstring''' self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return self._get_compatibles() @classmethod def lowerCamelCase__ ( cls : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = list(set([cls.__name__] + cls._compatibles ) ) __UpperCAmelCase : List[str] = importlib.import_module(__name__.split(""".""" )[0] ) __UpperCAmelCase : List[str] = [ getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase ) ] return compatible_classes
320
1
"""simple docstring""" import logging import os import threading import time try: import warnings except ImportError: UpperCAmelCase : Tuple = None try: import msvcrt except ImportError: UpperCAmelCase : List[Any] = None try: import fcntl except ImportError: UpperCAmelCase : str = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: UpperCAmelCase : Optional[int] = OSError # Data # ------------------------------------------------ UpperCAmelCase : int = [ 'Timeout', 'BaseFileLock', 'WindowsFileLock', 'UnixFileLock', 'SoftFileLock', 'FileLock', ] UpperCAmelCase : Union[str, Any] = '3.0.12' UpperCAmelCase : Optional[int] = None def lowerCamelCase ( ) -> Tuple: '''simple docstring''' global _logger __UpperCAmelCase : int = _logger or logging.getLogger(__name__ ) return _logger class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Any , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = lock_file return None def __str__( self : Any ): '''simple docstring''' __UpperCAmelCase : Tuple = f'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class lowerCamelCase__ : """simple docstring""" def __init__( self : List[Any] , UpperCamelCase : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Any = lock return None def __enter__( self : Any ): '''simple docstring''' return self.lock def __exit__( self : Dict , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Tuple ): '''simple docstring''' self.lock.release() return None class lowerCamelCase__ : """simple docstring""" def __init__( self : Tuple , UpperCamelCase : List[str] , UpperCamelCase : List[Any]=-1 , UpperCamelCase : Tuple=None ): '''simple docstring''' __UpperCAmelCase : Dict = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __UpperCAmelCase : Optional[Any] = self.hash_filename_if_too_long(UpperCamelCase , UpperCamelCase ) # The path to the lock file. __UpperCAmelCase : str = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __UpperCAmelCase : int = None # The default timeout value. __UpperCAmelCase : Tuple = timeout # We use this lock primarily for the lock counter. __UpperCAmelCase : List[Any] = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __UpperCAmelCase : Any = 0 return None @property def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' return self._lock_file @property def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' return self._timeout @timeout.setter def lowerCamelCase__ ( self : str , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = float(UpperCamelCase ) return None def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' raise NotImplementedError() def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' raise NotImplementedError() @property def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' return self._lock_file_fd is not None def lowerCamelCase__ ( self : Dict , UpperCamelCase : Dict=None , UpperCamelCase : int=0.05 ): '''simple docstring''' if timeout is None: __UpperCAmelCase : List[str] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __UpperCAmelCase : List[str] = id(self ) __UpperCAmelCase : Dict = self._lock_file __UpperCAmelCase : Optional[int] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(UpperCamelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __UpperCAmelCase : Any = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Optional[Any]=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __UpperCAmelCase : Optional[int] = id(self ) __UpperCAmelCase : List[Any] = self._lock_file logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() __UpperCAmelCase : Optional[Any] = 0 logger().debug(f'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self : List[str] ): '''simple docstring''' self.acquire() return self def __exit__( self : int , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : Tuple ): '''simple docstring''' self.release() return None def __del__( self : Tuple ): '''simple docstring''' self.release(force=UpperCamelCase ) return None def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : str , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : int = os.path.basename(UpperCamelCase ) if len(UpperCamelCase ) > max_length and max_length > 0: __UpperCAmelCase : Dict = os.path.dirname(UpperCamelCase ) __UpperCAmelCase : List[str] = str(hash(UpperCamelCase ) ) __UpperCAmelCase : Optional[Any] = filename[: max_length - len(UpperCamelCase ) - 8] + """...""" + hashed_filename + """.lock""" return os.path.join(UpperCamelCase , UpperCamelCase ) else: return path class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : str , UpperCamelCase : Tuple , UpperCamelCase : List[str]=-1 , UpperCamelCase : Union[str, Any]=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(UpperCamelCase , timeout=UpperCamelCase , max_filename_length=UpperCamelCase ) __UpperCAmelCase : str = """\\\\?\\""" + relative_to_absolute_path(self.lock_file ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : Dict = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __UpperCAmelCase : int = os.open(self._lock_file , UpperCamelCase ) except OSError: pass else: try: msvcrt.locking(UpperCamelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(UpperCamelCase ) else: __UpperCAmelCase : int = fd return None def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = self._lock_file_fd __UpperCAmelCase : List[str] = None msvcrt.locking(UpperCamelCase , msvcrt.LK_UNLCK , 1 ) os.close(UpperCamelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : str=-1 , UpperCamelCase : Dict=None ): '''simple docstring''' __UpperCAmelCase : Any = os.statvfs(os.path.dirname(UpperCamelCase ) ).f_namemax super().__init__(UpperCamelCase , timeout=UpperCamelCase , max_filename_length=UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : str = os.O_RDWR | os.O_CREAT | os.O_TRUNC __UpperCAmelCase : Tuple = os.open(self._lock_file , UpperCamelCase ) try: fcntl.flock(UpperCamelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(UpperCamelCase ) else: __UpperCAmelCase : int = fd return None def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Dict = self._lock_file_fd __UpperCAmelCase : Union[str, Any] = None fcntl.flock(UpperCamelCase , fcntl.LOCK_UN ) os.close(UpperCamelCase ) return None class lowerCamelCase__ ( A ): """simple docstring""" def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Any = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __UpperCAmelCase : List[Any] = os.open(self._lock_file , UpperCamelCase ) except OSError: pass else: __UpperCAmelCase : str = fd return None def lowerCamelCase__ ( self : Dict ): '''simple docstring''' os.close(self._lock_file_fd ) __UpperCAmelCase : str = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None UpperCAmelCase : Optional[Any] = None if msvcrt: UpperCAmelCase : Optional[int] = WindowsFileLock elif fcntl: UpperCAmelCase : Optional[Any] = UnixFileLock else: UpperCAmelCase : int = SoftFileLock if warnings is not None: warnings.warn('only soft file lock is available')
320
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase__ ( *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ): '''simple docstring''' pass def lowerCamelCase ( _UpperCamelCase : Image ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = hashlib.mda(image.tobytes() ) return m.hexdigest()[:1_0] def lowerCamelCase ( _UpperCamelCase : Image ) -> Dict: '''simple docstring''' __UpperCAmelCase : Tuple = np.array(_UpperCamelCase ) __UpperCAmelCase : List[Any] = npimg.shape return {"hash": hashimage(_UpperCamelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" __a = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __a = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' pass @require_tf @unittest.skip("""Image segmentation not implemented in TF""" ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Tuple = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" ) __UpperCAmelCase : Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : int = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, {"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967}, {"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993}, {"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909}, {"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879}, {"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834}, {"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716}, {"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612}, {"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599}, {"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552}, {"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532}, {"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516}, {"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499}, {"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483}, {"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464}, {"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408}, {"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335}, {"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326}, {"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262}, {"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999}, {"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986}, {"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984}, {"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873}, {"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871} ] , ) # fmt: on @require_torch @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Any = """facebook/sam-vit-huge""" __UpperCAmelCase : str = pipeline("""mask-generation""" , model=UpperCamelCase ) __UpperCAmelCase : int = image_segmenter( """http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : Dict = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, ] , )
320
1
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path UpperCAmelCase : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) UpperCAmelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase] UpperCAmelCase : set[int] = {ord(char) for char in VALID_CHARS} UpperCAmelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : tuple[int, ...] ) -> str | None: '''simple docstring''' __UpperCAmelCase : str = "" __UpperCAmelCase : int __UpperCAmelCase : int __UpperCAmelCase : int for keychar, cipherchar in zip(cycle(_UpperCamelCase ) , _UpperCamelCase ): __UpperCAmelCase : Optional[Any] = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(_UpperCamelCase ) return decoded def lowerCamelCase ( _UpperCamelCase : list[int] ) -> list[str]: '''simple docstring''' __UpperCAmelCase : list[str] = [] for key in product(_UpperCamelCase , repeat=3 ): __UpperCAmelCase : Optional[int] = try_key(_UpperCamelCase , _UpperCamelCase ) if encoded is not None: possibles.append(_UpperCamelCase ) return possibles def lowerCamelCase ( _UpperCamelCase : list[str] , _UpperCamelCase : str ) -> list[str]: '''simple docstring''' return [possible for possible in possibles if common_word in possible.lower()] def lowerCamelCase ( _UpperCamelCase : str = "p059_cipher.txt" ) -> int: '''simple docstring''' __UpperCAmelCase : list[int] __UpperCAmelCase : list[str] __UpperCAmelCase : str __UpperCAmelCase : str __UpperCAmelCase : str = Path(_UpperCamelCase ).parent.joinpath(_UpperCamelCase ).read_text(encoding="""utf-8""" ) __UpperCAmelCase : Optional[Any] = [int(_UpperCamelCase ) for number in data.strip().split(""",""" )] __UpperCAmelCase : List[Any] = filter_valid_chars(_UpperCamelCase ) for common_word in COMMON_WORDS: __UpperCAmelCase : Optional[int] = filter_common_word(_UpperCamelCase , _UpperCamelCase ) if len(_UpperCamelCase ) == 1: break __UpperCAmelCase : int = possibles[0] return sum(ord(_UpperCamelCase ) for char in decoded_text ) if __name__ == "__main__": print(F"{solution() = }")
320
"""simple docstring""" import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , UpperCamelCase : str ): '''simple docstring''' super().__init__() __UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase ) __UpperCAmelCase : int = list(model.children() )[:-2] __UpperCAmelCase : List[Any] = nn.Sequential(*UpperCamelCase ) __UpperCAmelCase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.pool(self.model(UpperCamelCase ) ) __UpperCAmelCase : List[Any] = torch.flatten(UpperCamelCase , start_dim=2 ) __UpperCAmelCase : Any = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )] __UpperCAmelCase : Any = os.path.dirname(UpperCamelCase ) __UpperCAmelCase : List[str] = tokenizer __UpperCAmelCase : str = labels __UpperCAmelCase : Optional[int] = len(UpperCamelCase ) __UpperCAmelCase : int = max_seq_length __UpperCAmelCase : int = transforms def __len__( self : List[str] ): '''simple docstring''' return len(self.data ) def __getitem__( self : List[str] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) ) __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1] __UpperCAmelCase : Any = sentence[: self.max_seq_length] __UpperCAmelCase : Tuple = torch.zeros(self.n_classes ) __UpperCAmelCase : str = 1 __UpperCAmelCase : Any = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) __UpperCAmelCase : Optional[int] = self.transforms(UpperCamelCase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Any = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = [len(row["""sentence"""] ) for row in batch] __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase ) __UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) __UpperCAmelCase : str = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ): __UpperCAmelCase : List[str] = input_row["""sentence"""] __UpperCAmelCase : Tuple = 1 __UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] ) __UpperCAmelCase : Optional[Any] = torch.stack([row["""label"""] for row in batch] ) __UpperCAmelCase : str = torch.stack([row["""image_start_token"""] for row in batch] ) __UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ) -> int: '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' return transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ), ] )
320
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Optional[int] = logging.get_logger(__name__) UpperCAmelCase : List[str] = { 'microsoft/unispeech-large-1500h-cv': ( 'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class lowerCamelCase__ ( A ): """simple docstring""" __a = """unispeech""" def __init__( self : str , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Union[str, Any]=768 , UpperCamelCase : Any=12 , UpperCamelCase : int=12 , UpperCamelCase : List[str]=3_072 , UpperCamelCase : Tuple="gelu" , UpperCamelCase : str=0.1 , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : int=1e-5 , UpperCamelCase : List[str]="group" , UpperCamelCase : Tuple="gelu" , UpperCamelCase : Any=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase : str=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase : Tuple=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase : str=False , UpperCamelCase : List[str]=128 , UpperCamelCase : Union[str, Any]=16 , UpperCamelCase : Optional[Any]=False , UpperCamelCase : Optional[Any]=True , UpperCamelCase : int=0.05 , UpperCamelCase : Any=10 , UpperCamelCase : Any=2 , UpperCamelCase : Any=0.0 , UpperCamelCase : int=10 , UpperCamelCase : Any=0 , UpperCamelCase : str=320 , UpperCamelCase : Optional[int]=2 , UpperCamelCase : int=0.1 , UpperCamelCase : Union[str, Any]=100 , UpperCamelCase : List[str]=256 , UpperCamelCase : Union[str, Any]=256 , UpperCamelCase : Dict=0.1 , UpperCamelCase : Optional[int]="mean" , UpperCamelCase : Dict=False , UpperCamelCase : Any=False , UpperCamelCase : Tuple=256 , UpperCamelCase : Optional[Any]=80 , UpperCamelCase : int=0 , UpperCamelCase : Tuple=1 , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : int=0.5 , **UpperCamelCase : Any , ): '''simple docstring''' super().__init__(**UpperCamelCase , pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : Tuple = feat_extract_norm __UpperCAmelCase : str = feat_extract_activation __UpperCAmelCase : Dict = list(UpperCamelCase ) __UpperCAmelCase : Dict = list(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = list(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = conv_bias __UpperCAmelCase : int = num_conv_pos_embeddings __UpperCAmelCase : str = num_conv_pos_embedding_groups __UpperCAmelCase : List[str] = len(self.conv_dim ) __UpperCAmelCase : List[str] = num_hidden_layers __UpperCAmelCase : Tuple = intermediate_size __UpperCAmelCase : Tuple = hidden_act __UpperCAmelCase : Dict = num_attention_heads __UpperCAmelCase : Optional[Any] = hidden_dropout __UpperCAmelCase : Tuple = attention_dropout __UpperCAmelCase : Optional[int] = activation_dropout __UpperCAmelCase : Union[str, Any] = feat_proj_dropout __UpperCAmelCase : List[str] = final_dropout __UpperCAmelCase : List[Any] = layerdrop __UpperCAmelCase : Union[str, Any] = layer_norm_eps __UpperCAmelCase : List[Any] = initializer_range __UpperCAmelCase : List[Any] = num_ctc_classes __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : int = do_stable_layer_norm __UpperCAmelCase : Tuple = use_weighted_layer_sum __UpperCAmelCase : Optional[Any] = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __UpperCAmelCase : Tuple = apply_spec_augment __UpperCAmelCase : Dict = mask_time_prob __UpperCAmelCase : str = mask_time_length __UpperCAmelCase : Tuple = mask_time_min_masks __UpperCAmelCase : str = mask_feature_prob __UpperCAmelCase : Optional[int] = mask_feature_length __UpperCAmelCase : Union[str, Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations __UpperCAmelCase : List[str] = num_codevectors_per_group __UpperCAmelCase : Any = num_codevector_groups __UpperCAmelCase : Optional[int] = contrastive_logits_temperature __UpperCAmelCase : str = feat_quantizer_dropout __UpperCAmelCase : List[Any] = num_negatives __UpperCAmelCase : str = codevector_dim __UpperCAmelCase : Optional[Any] = proj_codevector_dim __UpperCAmelCase : Tuple = diversity_loss_weight # ctc loss __UpperCAmelCase : str = ctc_loss_reduction __UpperCAmelCase : int = ctc_zero_infinity # pretraining loss __UpperCAmelCase : Dict = replace_prob @property def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
320
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
320
1
"""simple docstring""" from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase : Dict = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, 'constant': get_constant_schedule, 'constant_w_warmup': get_constant_schedule_with_warmup, } class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Union[str, Any] , UpperCamelCase : int=None , UpperCamelCase : Tuple=None , *UpperCamelCase : str , **UpperCamelCase : List[Any] ): '''simple docstring''' super().__init__(*UpperCamelCase , **UpperCamelCase ) if config is None: assert isinstance(self.model , UpperCamelCase ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f''' {self.model.__class__}''' ) __UpperCAmelCase : str = self.model.config else: __UpperCAmelCase : Optional[Any] = config __UpperCAmelCase : str = data_args __UpperCAmelCase : Tuple = self.config.tgt_vocab_size if isinstance(self.config , UpperCamelCase ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' """ padding..""" ) if self.args.label_smoothing == 0: __UpperCAmelCase : List[str] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss __UpperCAmelCase : Any = label_smoothed_nll_loss def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : int ): '''simple docstring''' if self.optimizer is None: __UpperCAmelCase : str = ["""bias""", """LayerNorm.weight"""] __UpperCAmelCase : List[str] = [ { """params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], """weight_decay""": self.args.weight_decay, }, { """params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] __UpperCAmelCase : Optional[Any] = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: __UpperCAmelCase : Dict = Adafactor __UpperCAmelCase : int = {"""scale_parameter""": False, """relative_step""": False} else: __UpperCAmelCase : Union[str, Any] = AdamW __UpperCAmelCase : Optional[Any] = { """betas""": (self.args.adam_betaa, self.args.adam_betaa), """eps""": self.args.adam_epsilon, } __UpperCAmelCase : Dict = self.args.learning_rate if self.sharded_ddp: __UpperCAmelCase : List[Any] = OSS( params=UpperCamelCase , optim=UpperCamelCase , **UpperCamelCase , ) else: __UpperCAmelCase : Optional[Any] = optimizer_cls(UpperCamelCase , **UpperCamelCase ) if self.lr_scheduler is None: __UpperCAmelCase : Optional[Any] = self._get_lr_scheduler(UpperCamelCase ) else: # ignoring --lr_scheduler logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : Dict ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": __UpperCAmelCase : int = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": __UpperCAmelCase : Tuple = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: __UpperCAmelCase : Dict = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=UpperCamelCase ) return scheduler def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def lowerCamelCase__ ( self : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ): '''simple docstring''' if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token __UpperCAmelCase : Union[str, Any] = model(**UpperCamelCase , use_cache=UpperCamelCase )[0] __UpperCAmelCase : str = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models __UpperCAmelCase ,__UpperCAmelCase : Any = model(**UpperCamelCase , labels=UpperCamelCase , use_cache=UpperCamelCase )[:2] else: # compute label smoothed loss __UpperCAmelCase : str = model(**UpperCamelCase , use_cache=UpperCamelCase )[0] __UpperCAmelCase : List[Any] = torch.nn.functional.log_softmax(UpperCamelCase , dim=-1 ) __UpperCAmelCase ,__UpperCAmelCase : str = self.loss_fn(UpperCamelCase , UpperCamelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = inputs.pop("""labels""" ) __UpperCAmelCase ,__UpperCAmelCase : Tuple = self._compute_loss(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return loss def lowerCamelCase__ ( self : int , UpperCamelCase : nn.Module , UpperCamelCase : Dict[str, Union[torch.Tensor, Any]] , UpperCamelCase : bool , UpperCamelCase : Optional[List[str]] = None , ): '''simple docstring''' __UpperCAmelCase : Tuple = self._prepare_inputs(UpperCamelCase ) __UpperCAmelCase : Optional[int] = { """max_length""": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, """num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: __UpperCAmelCase : Optional[int] = self.model.generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **UpperCamelCase , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: __UpperCAmelCase : str = self._pad_tensors_to_max_len(UpperCamelCase , gen_kwargs["""max_length"""] ) __UpperCAmelCase : int = inputs.pop("""labels""" ) with torch.no_grad(): # compute loss on predict data __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self._compute_loss(UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : int = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) __UpperCAmelCase : Union[str, Any] = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: __UpperCAmelCase : Union[str, Any] = self._pad_tensors_to_max_len(UpperCamelCase , gen_kwargs["""max_length"""] ) return (loss, logits, labels) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( """Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be""" f''' padded to `max_length`={max_length}''' ) __UpperCAmelCase : Union[str, Any] = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) __UpperCAmelCase : Optional[Any] = tensor return padded_tensor
320
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ) __UpperCAmelCase : List[Any] = sum(_UpperCamelCase ) __UpperCAmelCase : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __UpperCAmelCase : Any = True for i in range(1 , s + 1 ): __UpperCAmelCase : List[Any] = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __UpperCAmelCase : Optional[int] = dp[i][j - 1] if arr[i - 1] <= j: __UpperCAmelCase : Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __UpperCAmelCase : Optional[int] = s - 2 * j break return diff
320
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase : int = { 'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'], 'configuration_maskformer_swin': ['MaskFormerSwinConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = ['MaskFormerFeatureExtractor'] UpperCAmelCase : Any = ['MaskFormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[int] = [ 'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'MaskFormerForInstanceSegmentation', 'MaskFormerModel', 'MaskFormerPreTrainedModel', ] UpperCAmelCase : List[Any] = [ 'MaskFormerSwinBackbone', 'MaskFormerSwinModel', 'MaskFormerSwinPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
320
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase : Optional[int] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""pixel_values"""] def __init__( self : Tuple , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : int = do_resize __UpperCAmelCase : Tuple = size __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : Any = do_center_crop __UpperCAmelCase : int = crop_size __UpperCAmelCase : Optional[int] = do_rescale __UpperCAmelCase : List[Any] = rescale_factor __UpperCAmelCase : Tuple = do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD __UpperCAmelCase : List[Any] = do_convert_rgb def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __UpperCAmelCase : int = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Dict = size if size is not None else self.size __UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Dict = resample if resample is not None else self.resample __UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Any = image_std if image_std is not None else self.image_std __UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCAmelCase : List[str] = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCAmelCase : int = [convert_to_rgb(UpperCamelCase ) for image in images] # All transformations expect numpy arrays. __UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: __UpperCAmelCase : Optional[int] = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: __UpperCAmelCase : int = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: __UpperCAmelCase : Dict = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: __UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] __UpperCAmelCase : Any = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] __UpperCAmelCase : Any = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
320
1
"""simple docstring""" import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : int ): '''simple docstring''' super().tearDown() gc.collect() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : int = FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) __UpperCAmelCase : Dict = """A painting of a squirrel eating a burger""" __UpperCAmelCase : Any = jax.device_count() __UpperCAmelCase : List[str] = num_samples * [prompt] __UpperCAmelCase : Tuple = sd_pipe.prepare_inputs(UpperCamelCase ) __UpperCAmelCase : int = replicate(UpperCamelCase ) __UpperCAmelCase : Optional[Any] = shard(UpperCamelCase ) __UpperCAmelCase : List[str] = jax.random.PRNGKey(0 ) __UpperCAmelCase : Tuple = jax.random.split(UpperCamelCase , jax.device_count() ) __UpperCAmelCase : Tuple = sd_pipe(UpperCamelCase , UpperCamelCase , UpperCamelCase , num_inference_steps=25 , jit=UpperCamelCase )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __UpperCAmelCase : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __UpperCAmelCase : Optional[int] = images[0, 253:256, 253:256, -1] __UpperCAmelCase : str = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __UpperCAmelCase : Optional[int] = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] ) print(f'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Dict = """stabilityai/stable-diffusion-2""" __UpperCAmelCase ,__UpperCAmelCase : Any = FlaxDPMSolverMultistepScheduler.from_pretrained(UpperCamelCase , subfolder="""scheduler""" ) __UpperCAmelCase ,__UpperCAmelCase : List[str] = FlaxStableDiffusionPipeline.from_pretrained( UpperCamelCase , scheduler=UpperCamelCase , revision="""bf16""" , dtype=jnp.bfloataa , ) __UpperCAmelCase : List[Any] = scheduler_params __UpperCAmelCase : Optional[int] = """A painting of a squirrel eating a burger""" __UpperCAmelCase : Tuple = jax.device_count() __UpperCAmelCase : Optional[int] = num_samples * [prompt] __UpperCAmelCase : Optional[int] = sd_pipe.prepare_inputs(UpperCamelCase ) __UpperCAmelCase : List[Any] = replicate(UpperCamelCase ) __UpperCAmelCase : str = shard(UpperCamelCase ) __UpperCAmelCase : str = jax.random.PRNGKey(0 ) __UpperCAmelCase : Tuple = jax.random.split(UpperCamelCase , jax.device_count() ) __UpperCAmelCase : Any = sd_pipe(UpperCamelCase , UpperCamelCase , UpperCamelCase , num_inference_steps=25 , jit=UpperCamelCase )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __UpperCAmelCase : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __UpperCAmelCase : Dict = images[0, 253:256, 253:256, -1] __UpperCAmelCase : Any = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __UpperCAmelCase : List[Any] = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] ) print(f'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
320
"""simple docstring""" from collections.abc import Sequence def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) ) def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' __UpperCAmelCase : Dict = 0.0 for coeff in reversed(_UpperCamelCase ): __UpperCAmelCase : Any = result * x + coeff return result if __name__ == "__main__": UpperCAmelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCAmelCase : str = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
320
1
"""simple docstring""" from __future__ import annotations import numpy as np def lowerCamelCase ( _UpperCamelCase : list[float] ) -> Optional[Any]: '''simple docstring''' return np.maximum(0 , _UpperCamelCase ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
320
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase : Optional[int] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class lowerCamelCase__ : """simple docstring""" __a = PegasusConfig __a = {} __a = """gelu""" def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Any=4 , UpperCamelCase : Tuple=37 , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : List[str]=2 , UpperCamelCase : int=1 , UpperCamelCase : Optional[Any]=0 , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : str = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Dict = is_training __UpperCAmelCase : Dict = use_labels __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[Any] = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : List[Any] = max_position_embeddings __UpperCAmelCase : Any = eos_token_id __UpperCAmelCase : Optional[int] = pad_token_id __UpperCAmelCase : List[str] = bos_token_id def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __UpperCAmelCase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCAmelCase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCAmelCase : Any = prepare_pegasus_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, inputs_dict def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : Tuple = model_class_name(UpperCamelCase ) __UpperCAmelCase : List[Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : int = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Tuple = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Dict = model.decode(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : int = model_class_name(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : Dict = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : int = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : List[str] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Union[str, Any] = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Any=None , ) -> Dict: '''simple docstring''' if attention_mask is None: __UpperCAmelCase : Optional[int] = np.not_equal(_UpperCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __UpperCAmelCase : Dict = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __a = True __a = False __a = False __a = False def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = FlaxPegasusModelTester(self ) __UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = model_class(UpperCamelCase ) @jax.jit def encode_jitted(UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]=None , **UpperCamelCase : List[str] ): return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Tuple = encode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : Optional[int] = encode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : int = model_class(UpperCamelCase ) __UpperCAmelCase : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __UpperCAmelCase : Any = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ): return model.decode( decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : str = decode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCamelCase ) __UpperCAmelCase : Optional[int] = np.ones((1, 1) ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : List[Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCAmelCase : List[str] = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""np""" , truncation=UpperCamelCase , max_length=512 , padding=UpperCamelCase ) __UpperCAmelCase : int = model.generate(**UpperCamelCase , num_beams=2 ).sequences __UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) assert tgt_text == decoded
320
1
"""simple docstring""" import math def lowerCamelCase ( ) -> None: '''simple docstring''' __UpperCAmelCase : Any = input("""Enter message: """ ) __UpperCAmelCase : int = int(input(f'''Enter key [2-{len(_UpperCamelCase ) - 1}]: ''' ) ) __UpperCAmelCase : Any = input("""Encryption/Decryption [e/d]: """ ) if mode.lower().startswith("""e""" ): __UpperCAmelCase : str = encrypt_message(_UpperCamelCase , _UpperCamelCase ) elif mode.lower().startswith("""d""" ): __UpperCAmelCase : Tuple = decrypt_message(_UpperCamelCase , _UpperCamelCase ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(f'''Output:\n{text + "|"}''' ) def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : str ) -> str: '''simple docstring''' __UpperCAmelCase : str = [""""""] * key for col in range(_UpperCamelCase ): __UpperCAmelCase : Union[str, Any] = col while pointer < len(_UpperCamelCase ): cipher_text[col] += message[pointer] pointer += key return "".join(_UpperCamelCase ) def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : str ) -> str: '''simple docstring''' __UpperCAmelCase : Any = math.ceil(len(_UpperCamelCase ) / key ) __UpperCAmelCase : Union[str, Any] = key __UpperCAmelCase : Optional[Any] = (num_cols * num_rows) - len(_UpperCamelCase ) __UpperCAmelCase : int = [""""""] * num_cols __UpperCAmelCase : int = 0 __UpperCAmelCase : int = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): __UpperCAmelCase : Optional[Any] = 0 row += 1 return "".join(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
320
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : List[str] = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } UpperCAmelCase : List[str] = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = EfficientNetConfig() __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""hidden_dim"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""] __UpperCAmelCase : str = CONFIG_MAP[model_name]["""depth_coef"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""dropout_rate"""] __UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""] __UpperCAmelCase : int = """huggingface/label-files""" __UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json""" __UpperCAmelCase : str = 1_0_0_0 __UpperCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : Dict = idalabel __UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" __UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im def lowerCamelCase ( _UpperCamelCase : Any ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : List[str] = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , ) return preprocessor def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] __UpperCAmelCase : str = sorted(set(_UpperCamelCase ) ) __UpperCAmelCase : Optional[int] = len(_UpperCamelCase ) __UpperCAmelCase : Any = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )} __UpperCAmelCase : Any = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: __UpperCAmelCase : List[str] = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) __UpperCAmelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: __UpperCAmelCase : Optional[Any] = """efficientnet.""" + item[1] __UpperCAmelCase : Tuple = """classifier.weight""" __UpperCAmelCase : Optional[int] = """classifier.bias""" return key_mapping def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Tuple: '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue __UpperCAmelCase : List[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: __UpperCAmelCase : int = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __UpperCAmelCase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __UpperCAmelCase : List[str] = torch.from_numpy(np.transpose(_UpperCamelCase ) ) else: __UpperCAmelCase : Tuple = torch.from_numpy(_UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCamelCase ) @torch.no_grad() def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = model_classes[model_name]( include_top=_UpperCamelCase , weights="""imagenet""" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , ) __UpperCAmelCase : List[str] = original_model.trainable_variables __UpperCAmelCase : List[Any] = original_model.non_trainable_variables __UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __UpperCAmelCase : int = param.numpy() __UpperCAmelCase : Dict = list(tf_params.keys() ) # Load HuggingFace model __UpperCAmelCase : Optional[Any] = get_efficientnet_config(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(_UpperCamelCase ).eval() __UpperCAmelCase : Any = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) __UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase ) replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Initialize preprocessor and preprocess input image __UpperCAmelCase : List[Any] = convert_image_processor(_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[int] = hf_model(**_UpperCamelCase ) __UpperCAmelCase : Any = outputs.logits.detach().numpy() # Original model inference __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __UpperCAmelCase : Optional[Any] = image.img_to_array(_UpperCamelCase ) __UpperCAmelCase : Tuple = np.expand_dims(_UpperCamelCase , axis=0 ) __UpperCAmelCase : str = original_model.predict(_UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCamelCase ): os.mkdir(_UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCamelCase ) preprocessor.save_pretrained(_UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) __UpperCAmelCase : List[str] = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_UpperCamelCase ) hf_model.push_to_hub(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') UpperCAmelCase : Any = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
320
1
"""simple docstring""" from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge UpperCAmelCase : Tuple = [ 'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the' ' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe' ' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.', 'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal' ' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s' ' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the' ' body.', 'Amnesty International releases its annual report on the death penalty. The report catalogs the use of' ' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the' ' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital' ' punishment.', ] UpperCAmelCase : Optional[Any] = [ 'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .' ' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz' ' had informed his Lufthansa training school of an episode of severe depression, airline says .', 'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .' ' Israel and the United States opposed the move, which could open the door to war crimes investigations against' ' Israelis .', 'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to' ' death . Organization claims that governments around the world are using the threat of terrorism to advance' ' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death' ' sentences up by 28% .', ] def lowerCamelCase ( ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , bootstrap_aggregation=_UpperCamelCase , rouge_keys=["""rouge2""", """rougeL"""] ) assert isinstance(_UpperCamelCase , _UpperCamelCase ) __UpperCAmelCase : int = calculate_rouge(_UpperCamelCase , _UpperCamelCase , bootstrap_aggregation=_UpperCamelCase , rouge_keys=["""rouge2"""] ) assert ( pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean() ) def lowerCamelCase ( ) -> Any: '''simple docstring''' __UpperCAmelCase : List[Any] = """rougeLsum""" __UpperCAmelCase : Optional[Any] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=[k] )[k] __UpperCAmelCase : Any = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=[k] )[k] assert score > score_no_sep def lowerCamelCase ( ) -> Any: '''simple docstring''' __UpperCAmelCase : List[str] = ["""rouge1""", """rouge2""", """rougeL"""] __UpperCAmelCase : Optional[Any] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=_UpperCamelCase ) __UpperCAmelCase : List[str] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=_UpperCamelCase ) assert score_sep == score_no_sep def lowerCamelCase ( ) -> int: '''simple docstring''' __UpperCAmelCase : str = [ """Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""", """Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""", ] __UpperCAmelCase : int = [ """Margot Frank, died in 1945, a month earlier than previously thought.""", """Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of""" """ the final seconds on board Flight 9525.""", ] assert calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase ) == calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase ) def lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [ """\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """ ] __UpperCAmelCase : str = [ """ Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .""" ] __UpperCAmelCase : Dict = calculate_rouge(_UpperCamelCase , _UpperCamelCase , rouge_keys=["""rougeLsum"""] , newline_sep=_UpperCamelCase )["""rougeLsum"""] __UpperCAmelCase : Union[str, Any] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""] assert new_score > prev_score def lowerCamelCase ( ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" ) __UpperCAmelCase : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) ) assert isinstance(_UpperCamelCase , _UpperCamelCase ) __UpperCAmelCase : int = calculate_rouge_path( data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=_UpperCamelCase ) assert isinstance(_UpperCamelCase , _UpperCamelCase )
320
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""keras_nlp"""] def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ): '''simple docstring''' requires_backends(self , ["""keras_nlp"""] )
320
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] ) -> int: '''simple docstring''' def update_area_of_max_square(_UpperCamelCase : int , _UpperCamelCase : int ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 __UpperCAmelCase : Optional[int] = update_area_of_max_square(_UpperCamelCase , col + 1 ) __UpperCAmelCase : int = update_area_of_max_square(row + 1 , col + 1 ) __UpperCAmelCase : Optional[Any] = update_area_of_max_square(row + 1 , _UpperCamelCase ) if mat[row][col]: __UpperCAmelCase : List[Any] = 1 + min([right, diagonal, down] ) __UpperCAmelCase : Any = max(largest_square_area[0] , _UpperCamelCase ) return sub_problem_sol else: return 0 __UpperCAmelCase : Optional[int] = [0] update_area_of_max_square(0 , 0 ) return largest_square_area[0] def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] ) -> int: '''simple docstring''' def update_area_of_max_square_using_dp_array( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] __UpperCAmelCase : List[str] = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase ) __UpperCAmelCase : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase ) __UpperCAmelCase : str = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase ) if mat[row][col]: __UpperCAmelCase : List[Any] = 1 + min([right, diagonal, down] ) __UpperCAmelCase : Optional[Any] = max(largest_square_area[0] , _UpperCamelCase ) __UpperCAmelCase : Tuple = sub_problem_sol return sub_problem_sol else: return 0 __UpperCAmelCase : Dict = [0] __UpperCAmelCase : Any = [[-1] * cols for _ in range(_UpperCamelCase )] update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase ) return largest_square_area[0] def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] ) -> int: '''simple docstring''' __UpperCAmelCase : List[Any] = [[0] * (cols + 1) for _ in range(rows + 1 )] __UpperCAmelCase : Any = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): __UpperCAmelCase : Union[str, Any] = dp_array[row][col + 1] __UpperCAmelCase : Optional[int] = dp_array[row + 1][col + 1] __UpperCAmelCase : Optional[Any] = dp_array[row + 1][col] if mat[row][col] == 1: __UpperCAmelCase : Optional[int] = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) __UpperCAmelCase : Optional[Any] = max(dp_array[row][col] , _UpperCamelCase ) else: __UpperCAmelCase : int = 0 return largest_square_area def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] ) -> int: '''simple docstring''' __UpperCAmelCase : int = [0] * (cols + 1) __UpperCAmelCase : Tuple = [0] * (cols + 1) __UpperCAmelCase : int = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): __UpperCAmelCase : int = current_row[col + 1] __UpperCAmelCase : List[Any] = next_row[col + 1] __UpperCAmelCase : str = next_row[col] if mat[row][col] == 1: __UpperCAmelCase : Any = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = max(current_row[col] , _UpperCamelCase ) else: __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Any = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
320
"""simple docstring""" UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_UpperCamelCase ) __UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data ) __UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0 if padding_needed: # The padding that will be added later __UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6) else: __UpperCAmelCase : List[str] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode() + padding ) def lowerCamelCase ( _UpperCamelCase : str ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Tuple = ( """argument should be a bytes-like object or ASCII string, """ f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_UpperCamelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_UpperCamelCase , _UpperCamelCase ): try: __UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) __UpperCAmelCase : str = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one __UpperCAmelCase : List[str] = encoded_data[:-padding] __UpperCAmelCase : int = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: __UpperCAmelCase : Optional[Any] = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data ) __UpperCAmelCase : List[Any] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_UpperCamelCase ) , 8 ) ] return bytes(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
320
1
"""simple docstring""" import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""image_processor""", """tokenizer"""] __a = """OwlViTImageProcessor""" __a = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self : List[Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Tuple=None , **UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , UpperCamelCase , ) __UpperCAmelCase : Optional[int] = kwargs.pop("""feature_extractor""" ) __UpperCAmelCase : Optional[int] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(UpperCamelCase , UpperCamelCase ) def __call__( self : Union[str, Any] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Tuple="max_length" , UpperCamelCase : str="np" , **UpperCamelCase : List[Any] ): '''simple docstring''' if text is None and query_images is None and images is None: raise ValueError( """You have to specify at least one text or query image or image. All three cannot be none.""" ) if text is not None: if isinstance(UpperCamelCase , UpperCamelCase ) or (isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(text[0] , UpperCamelCase )): __UpperCAmelCase : Tuple = [self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )] elif isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(text[0] , UpperCamelCase ): __UpperCAmelCase : Optional[int] = [] # Maximum number of queries across batch __UpperCAmelCase : List[str] = max([len(UpperCamelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(UpperCamelCase ) != max_num_queries: __UpperCAmelCase : Optional[int] = t + [""" """] * (max_num_queries - len(UpperCamelCase )) __UpperCAmelCase : Tuple = self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) encodings.append(UpperCamelCase ) else: raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" ) if return_tensors == "np": __UpperCAmelCase : int = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) __UpperCAmelCase : Tuple = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp __UpperCAmelCase : Any = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) __UpperCAmelCase : List[str] = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch __UpperCAmelCase : List[Any] = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 ) __UpperCAmelCase : Tuple = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf __UpperCAmelCase : Any = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) __UpperCAmelCase : int = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) else: raise ValueError("""Target return tensor type could not be returned""" ) __UpperCAmelCase : int = BatchEncoding() __UpperCAmelCase : Optional[int] = input_ids __UpperCAmelCase : Dict = attention_mask if query_images is not None: __UpperCAmelCase : Tuple = BatchEncoding() __UpperCAmelCase : Tuple = self.image_processor( UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ).pixel_values __UpperCAmelCase : List[str] = query_pixel_values if images is not None: __UpperCAmelCase : str = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: __UpperCAmelCase : Optional[Any] = image_features.pixel_values return encoding elif query_images is not None and images is not None: __UpperCAmelCase : Dict = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : str ): '''simple docstring''' return self.image_processor.post_process(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ): '''simple docstring''' return self.image_processor.post_process_object_detection(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , *UpperCamelCase : Tuple , **UpperCamelCase : Union[str, Any] ): '''simple docstring''' return self.image_processor.post_process_image_guided_detection(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , *UpperCamelCase : int , **UpperCamelCase : Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Any ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCamelCase , ) return self.image_processor_class @property def lowerCamelCase__ ( self : Any ): '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCamelCase , ) return self.image_processor
320
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor UpperCAmelCase : str = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ): '''simple docstring''' warnings.warn( """The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
320
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : int ) -> int: '''simple docstring''' __UpperCAmelCase : Dict = 0 while num > 0: digit_sum += num % 1_0 num //= 1_0 return digit_sum def lowerCamelCase ( _UpperCamelCase : int = 1_0_0 ) -> int: '''simple docstring''' __UpperCAmelCase : Dict = 1 __UpperCAmelCase : Union[str, Any] = 2 for i in range(2 , max_n + 1 ): __UpperCAmelCase : int = pre_numerator __UpperCAmelCase : Dict = 2 * i // 3 if i % 3 == 0 else 1 __UpperCAmelCase : Tuple = cur_numerator __UpperCAmelCase : List[Any] = e_cont * pre_numerator + temp return sum_digits(_UpperCamelCase ) if __name__ == "__main__": print(F"{solution() = }")
320
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = LEDTokenizer __a = LEDTokenizerFast __a = True def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' super().setUp() __UpperCAmelCase : Tuple = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] __UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) __UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} __UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase ) ) def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : List[str] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : str , UpperCamelCase : Any ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self : str ): '''simple docstring''' return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCamelCase , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIn("""input_ids""" , UpperCamelCase ) self.assertIn("""attention_mask""" , UpperCamelCase ) self.assertNotIn("""labels""" , UpperCamelCase ) self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : str = tokenizer( ["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual(batch.input_ids.shape , (2, 5_122) ) @require_torch def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""] __UpperCAmelCase : int = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Tuple = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Optional[Any] = inputs["""input_ids"""] __UpperCAmelCase : List[str] = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""] __UpperCAmelCase : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , padding=UpperCamelCase ) __UpperCAmelCase : str = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]] __UpperCAmelCase : List[Any] = tokenizer.pad(UpperCamelCase ) self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Any = """A, <mask> AllenNLP sentence.""" __UpperCAmelCase : Dict = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) __UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) __UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) __UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
320
1
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , UpperCamelCase : int , UpperCamelCase : Any=7 , UpperCamelCase : List[str]=3 , UpperCamelCase : str=30 , UpperCamelCase : str=400 , UpperCamelCase : Tuple=True , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[int]=[0.5, 0.5, 0.5] , UpperCamelCase : List[str]=[0.5, 0.5, 0.5] , UpperCamelCase : Tuple=True , UpperCamelCase : int=1 / 255 , UpperCamelCase : Union[str, Any]=True , ): '''simple docstring''' __UpperCAmelCase : Dict = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1_333} __UpperCAmelCase : Dict = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : Dict = num_channels __UpperCAmelCase : Optional[Any] = min_resolution __UpperCAmelCase : List[Any] = max_resolution __UpperCAmelCase : Tuple = do_resize __UpperCAmelCase : Any = size __UpperCAmelCase : List[str] = do_normalize __UpperCAmelCase : List[str] = image_mean __UpperCAmelCase : Any = image_std __UpperCAmelCase : Union[str, Any] = do_rescale __UpperCAmelCase : Optional[int] = rescale_factor __UpperCAmelCase : Dict = do_pad def lowerCamelCase__ ( self : int ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : List[Any]=False ): '''simple docstring''' if not batched: __UpperCAmelCase : Optional[Any] = image_inputs[0] if isinstance(UpperCamelCase , Image.Image ): __UpperCAmelCase ,__UpperCAmelCase : int = image.size else: __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = image.shape[1], image.shape[2] if w < h: __UpperCAmelCase : Optional[int] = int(self.size["""shortest_edge"""] * h / w ) __UpperCAmelCase : Optional[int] = self.size["""shortest_edge"""] elif w > h: __UpperCAmelCase : Union[str, Any] = self.size["""shortest_edge"""] __UpperCAmelCase : List[str] = int(self.size["""shortest_edge"""] * w / h ) else: __UpperCAmelCase : Optional[int] = self.size["""shortest_edge"""] __UpperCAmelCase : Any = self.size["""shortest_edge"""] else: __UpperCAmelCase : Tuple = [] for image in image_inputs: __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __UpperCAmelCase : Any = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0] __UpperCAmelCase : List[str] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = YolosImageProcessor if is_vision_available() else None def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = YolosImageProcessingTester(self ) @property def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) ) self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(UpperCamelCase , """size""" ) ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1_333} ) self.assertEqual(image_processor.do_pad , UpperCamelCase ) __UpperCAmelCase : List[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , UpperCamelCase ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' pass def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , Image.Image ) # Test not batched input __UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __UpperCAmelCase ,__UpperCAmelCase : Any = self.image_processor_tester.get_expected_values(UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCAmelCase ,__UpperCAmelCase : List[str] = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase ) __UpperCAmelCase : str = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , np.ndarray ) # Test not batched input __UpperCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __UpperCAmelCase ,__UpperCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCAmelCase : Tuple = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , torch.Tensor ) # Test not batched input __UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCAmelCase : Union[str, Any] = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) __UpperCAmelCase : List[Any] = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase ) # create random PyTorch tensors __UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors __UpperCAmelCase : Tuple = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Optional[int] = image_processing_a(UpperCamelCase , return_tensors="""pt""" ) self.assertTrue( torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) ) @slow def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: __UpperCAmelCase : str = json.loads(f.read() ) __UpperCAmelCase : Optional[Any] = {"""image_id""": 39_769, """annotations""": target} # encode them __UpperCAmelCase : int = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" ) __UpperCAmelCase : Optional[Any] = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" ) # verify pixel values __UpperCAmelCase : Optional[int] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase ) __UpperCAmelCase : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) ) # verify area __UpperCAmelCase : Tuple = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) ) # verify boxes __UpperCAmelCase : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) ) # verify image_id __UpperCAmelCase : Any = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) ) # verify is_crowd __UpperCAmelCase : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) ) # verify class_labels __UpperCAmelCase : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) ) # verify orig_size __UpperCAmelCase : List[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) ) # verify size __UpperCAmelCase : Optional[Any] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: __UpperCAmelCase : Optional[Any] = json.loads(f.read() ) __UpperCAmelCase : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 39_769, """segments_info""": target} __UpperCAmelCase : Optional[int] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them __UpperCAmelCase : Optional[int] = YolosImageProcessor(format="""coco_panoptic""" ) __UpperCAmelCase : str = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" ) # verify pixel values __UpperCAmelCase : Optional[int] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase ) __UpperCAmelCase : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) ) # verify area __UpperCAmelCase : Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) ) # verify boxes __UpperCAmelCase : List[str] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase ) __UpperCAmelCase : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) ) # verify image_id __UpperCAmelCase : List[Any] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) ) # verify is_crowd __UpperCAmelCase : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) ) # verify class_labels __UpperCAmelCase : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) ) # verify masks __UpperCAmelCase : List[Any] = 822_873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase ) # verify orig_size __UpperCAmelCase : Optional[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) ) # verify size __UpperCAmelCase : str = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
320
"""simple docstring""" from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class lowerCamelCase__ : """simple docstring""" def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : List[Any]=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : int=99 , UpperCamelCase : Any=[1, 1, 2] , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Union[str, Any]=8 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=0.0 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Any=3 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : str=None , UpperCamelCase : Tuple=False , ): '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : int = batch_size __UpperCAmelCase : str = seq_length __UpperCAmelCase : Optional[Any] = is_training __UpperCAmelCase : Optional[Any] = use_input_mask __UpperCAmelCase : Tuple = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : Optional[int] = block_sizes __UpperCAmelCase : Optional[Any] = num_decoder_layers __UpperCAmelCase : Union[str, Any] = d_model __UpperCAmelCase : Dict = n_head __UpperCAmelCase : Optional[Any] = d_head __UpperCAmelCase : Dict = d_inner __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout __UpperCAmelCase : List[Any] = attention_dropout __UpperCAmelCase : str = activation_dropout __UpperCAmelCase : Union[str, Any] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : str = 2 __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : List[Any] = num_choices __UpperCAmelCase : Any = scope __UpperCAmelCase : Dict = initializer_std # Used in the tests to check the size of the first attention layer __UpperCAmelCase : Dict = n_head # Used in the tests to check the size of the first hidden state __UpperCAmelCase : Dict = self.d_model # Used in the tests to check the number of output hidden states/attentions __UpperCAmelCase : Dict = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: __UpperCAmelCase : List[Any] = self.num_hidden_layers + 2 def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[str] = None if self.use_input_mask: __UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : str = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : List[str] = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = [input_ids, input_mask] __UpperCAmelCase : Dict = model(UpperCamelCase ) __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : Any = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) __UpperCAmelCase : int = [input_ids, input_mask] __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) __UpperCAmelCase : List[Any] = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : str = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , ): '''simple docstring''' __UpperCAmelCase : Tuple = TFFunnelForPreTraining(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : int = TFFunnelForMaskedLM(config=UpperCamelCase ) __UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_labels __UpperCAmelCase : Optional[Any] = TFFunnelForSequenceClassification(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_choices __UpperCAmelCase : str = TFFunnelForMultipleChoice(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : str = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : int = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : List[str] = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : str = TFFunnelForTokenClassification(config=UpperCamelCase ) __UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Any = TFFunnelForQuestionAnswering(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Any = model(UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : Dict = config_and_inputs __UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowerCamelCase__ ( A , A , unittest.TestCase ): """simple docstring""" __a = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) __a = ( { """feature-extraction""": (TFFunnelBaseModel, TFFunnelModel), """fill-mask""": TFFunnelForMaskedLM, """question-answering""": TFFunnelForQuestionAnswering, """text-classification""": TFFunnelForSequenceClassification, """token-classification""": TFFunnelForTokenClassification, """zero-shot""": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) __a = False __a = False def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModelTester(self ) __UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) @require_tf class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) __a = False __a = False def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : List[str] = TFFunnelModelTester(self , base=UpperCamelCase ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
320
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase : Dict = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = 0 while b > 0: if b & 1: __UpperCAmelCase : int = ((res % c) + (a % c)) % c a += a b >>= 1 return res
320
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> int: '''simple docstring''' __UpperCAmelCase : Optional[int] = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): __UpperCAmelCase : Dict = n - k # Calculate C(n,k) for i in range(_UpperCamelCase ): result *= n - i result //= i + 1 return result def lowerCamelCase ( _UpperCamelCase : int ) -> int: '''simple docstring''' return binomial_coefficient(2 * node_count , _UpperCamelCase ) // (node_count + 1) def lowerCamelCase ( _UpperCamelCase : int ) -> int: '''simple docstring''' if n < 0: raise ValueError("""factorial() not defined for negative values""" ) __UpperCAmelCase : Optional[Any] = 1 for i in range(1 , n + 1 ): result *= i return result def lowerCamelCase ( _UpperCamelCase : int ) -> int: '''simple docstring''' return catalan_number(_UpperCamelCase ) * factorial(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : Tuple = int(input('Enter the number of nodes: ').strip() or 0) if node_count <= 0: raise ValueError('We need some nodes to work with.') print( F"Given {node_count} nodes, there are {binary_tree_count(node_count)} " F"binary trees and {catalan_number(node_count)} binary search trees." )
320
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""image_processor""", """tokenizer"""] __a = """AutoImageProcessor""" __a = """AutoTokenizer""" def __init__( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ): '''simple docstring''' super().__init__(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : str = self.image_processor def __call__( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=None , **UpperCamelCase : Optional[int] ): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: __UpperCAmelCase : List[str] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if images is not None: __UpperCAmelCase : Optional[Any] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: __UpperCAmelCase : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def lowerCamelCase__ ( self : List[str] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
320
1
"""simple docstring""" import math def lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float ) -> float: '''simple docstring''' if ( not isinstance(_UpperCamelCase , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("""power_factor must be a valid float value between -1 and 1.""" ) return apparent_power * power_factor def lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float ) -> float: '''simple docstring''' if ( not isinstance(_UpperCamelCase , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("""power_factor must be a valid float value between -1 and 1.""" ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" from __future__ import annotations def lowerCamelCase ( _UpperCamelCase : list[float] , _UpperCamelCase : list[float] ) -> float: '''simple docstring''' __UpperCAmelCase : Tuple = sorted(numsa + numsa ) __UpperCAmelCase ,__UpperCAmelCase : Dict = divmod(len(_UpperCamelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : List[Any] = [float(x) for x in input('Enter the elements of first array: ').split()] UpperCAmelCase : Optional[int] = [float(x) for x in input('Enter the elements of second array: ').split()] print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
320
1
"""simple docstring""" import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def lowerCamelCase ( _UpperCamelCase : int ) -> Any: '''simple docstring''' __UpperCAmelCase : List[str] = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __UpperCAmelCase : Optional[int] = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: __UpperCAmelCase : str = 4 __UpperCAmelCase : Optional[Any] = 4_8 __UpperCAmelCase : List[str] = """pixelshuffle_aux""" elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __UpperCAmelCase : List[Any] = [6, 6, 6, 6] __UpperCAmelCase : Dict = 6_0 __UpperCAmelCase : Optional[int] = [6, 6, 6, 6] __UpperCAmelCase : Any = """pixelshuffledirect""" elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __UpperCAmelCase : Optional[Any] = 4 __UpperCAmelCase : str = """nearest+conv""" elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : List[str] = 1 __UpperCAmelCase : List[str] = 1_2_6 __UpperCAmelCase : Tuple = 7 __UpperCAmelCase : Optional[Any] = 255.0 __UpperCAmelCase : str = """""" return config def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : str ) -> str: '''simple docstring''' if "patch_embed.proj" in name and "layers" not in name: __UpperCAmelCase : Optional[int] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: __UpperCAmelCase : List[Any] = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" ) if "layers" in name: __UpperCAmelCase : List[Any] = name.replace("""layers""" , """encoder.stages""" ) if "residual_group.blocks" in name: __UpperCAmelCase : Tuple = name.replace("""residual_group.blocks""" , """layers""" ) if "attn.proj" in name: __UpperCAmelCase : Tuple = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: __UpperCAmelCase : Optional[int] = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: __UpperCAmelCase : str = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: __UpperCAmelCase : List[Any] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: __UpperCAmelCase : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __UpperCAmelCase : str = name.replace("""mlp.fc2""" , """output.dense""" ) if "q_bias" in name: __UpperCAmelCase : Dict = name.replace("""q_bias""" , """query.bias""" ) if "k_bias" in name: __UpperCAmelCase : int = name.replace("""k_bias""" , """key.bias""" ) if "v_bias" in name: __UpperCAmelCase : List[str] = name.replace("""v_bias""" , """value.bias""" ) if "cpb_mlp" in name: __UpperCAmelCase : Union[str, Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" ) if "patch_embed.proj" in name: __UpperCAmelCase : Optional[int] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" ) if name == "norm.weight": __UpperCAmelCase : Optional[int] = """layernorm.weight""" if name == "norm.bias": __UpperCAmelCase : Tuple = """layernorm.bias""" if "conv_first" in name: __UpperCAmelCase : Optional[Any] = name.replace("""conv_first""" , """first_convolution""" ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: __UpperCAmelCase : str = name.replace("""conv_last""" , """final_convolution""" ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: __UpperCAmelCase : List[str] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" ) if "upsample.0" in name: __UpperCAmelCase : Tuple = name.replace("""upsample.0""" , """upsample.convolution_0""" ) if "upsample.2" in name: __UpperCAmelCase : Optional[Any] = name.replace("""upsample.2""" , """upsample.convolution_1""" ) __UpperCAmelCase : Optional[Any] = """upsample.""" + name elif config.upsampler == "pixelshuffledirect": __UpperCAmelCase : Dict = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" ) __UpperCAmelCase : Union[str, Any] = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" ) else: pass else: __UpperCAmelCase : Union[str, Any] = """swin2sr.""" + name return name def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): __UpperCAmelCase : Any = orig_state_dict.pop(_UpperCamelCase ) if "qkv" in key: __UpperCAmelCase : Optional[int] = key.split(""".""" ) __UpperCAmelCase : Optional[int] = int(key_split[1] ) __UpperCAmelCase : str = int(key_split[4] ) __UpperCAmelCase : Any = config.embed_dim if "weight" in key: __UpperCAmelCase : Any = val[:dim, :] __UpperCAmelCase : Any = val[dim : dim * 2, :] __UpperCAmelCase : Dict = val[-dim:, :] else: __UpperCAmelCase : List[Any] = val[:dim] __UpperCAmelCase : Dict = val[dim : dim * 2] __UpperCAmelCase : Union[str, Any] = val[-dim:] pass else: __UpperCAmelCase : Union[str, Any] = val return orig_state_dict def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : Dict ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = get_config(_UpperCamelCase ) __UpperCAmelCase : int = SwinaSRForImageSuperResolution(_UpperCamelCase ) model.eval() __UpperCAmelCase : Any = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location="""cpu""" ) __UpperCAmelCase : int = convert_state_dict(_UpperCamelCase , _UpperCamelCase ) __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase ) if len(_UpperCamelCase ) > 0: raise ValueError("""Missing keys when converting: {}""".format(_UpperCamelCase ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f'''Unexpected key {key} in state_dict''' ) # verify values __UpperCAmelCase : Optional[Any] = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true""" __UpperCAmelCase : Union[str, Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert("""RGB""" ) __UpperCAmelCase : Union[str, Any] = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values __UpperCAmelCase : List[Any] = 1_2_6 if """Jpeg""" in checkpoint_url else 2_5_6 __UpperCAmelCase : int = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) __UpperCAmelCase : Tuple = transforms(_UpperCamelCase ).unsqueeze(0 ) if config.num_channels == 1: __UpperCAmelCase : Any = pixel_values[:, 0, :, :].unsqueeze(1 ) __UpperCAmelCase : List[str] = model(_UpperCamelCase ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: __UpperCAmelCase : int = torch.Size([1, 3, 5_1_2, 5_1_2] ) __UpperCAmelCase : str = torch.tensor( [[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __UpperCAmelCase : Union[str, Any] = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) __UpperCAmelCase : str = torch.tensor( [[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here __UpperCAmelCase : Optional[int] = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) __UpperCAmelCase : str = torch.tensor( [[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __UpperCAmelCase : List[str] = torch.Size([1, 3, 5_1_2, 5_1_2] ) __UpperCAmelCase : int = torch.tensor( [[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __UpperCAmelCase : Tuple = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) __UpperCAmelCase : Dict = torch.tensor( [[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] ) assert ( outputs.reconstruction.shape == expected_shape ), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}''' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _UpperCamelCase , atol=1E-3 ) print("""Looks ok!""" ) __UpperCAmelCase : Union[str, Any] = { """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": ( """swin2SR-classical-sr-x2-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": ( """swin2SR-classical-sr-x4-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": ( """swin2SR-compressed-sr-x4-48""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": ( """swin2SR-lightweight-x2-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": ( """swin2SR-realworld-sr-x4-64-bsrgan-psnr""" ), } __UpperCAmelCase : Optional[Any] = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(_UpperCamelCase ) if push_to_hub: model.push_to_hub(f'''caidas/{model_name}''' ) processor.push_to_hub(f'''caidas/{model_name}''' ) if __name__ == "__main__": UpperCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth', type=str, help='URL of the original Swin2SR checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.') UpperCAmelCase : Optional[Any] = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
320
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Optional[int] = tokenizer("""This is me""" , return_tensors="""pt""" ) __UpperCAmelCase : int = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) __UpperCAmelCase : Tuple = model.generate(**UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) __UpperCAmelCase : Tuple = model_reloaded.generate(**UpperCamelCase ) self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase ) ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Any = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(UpperCamelCase ): model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() model.save_pretrained(UpperCamelCase )
320
1
"""simple docstring""" import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCamelCase__ : """simple docstring""" def __init__( self : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any=13 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Dict=True , UpperCamelCase : List[str]=True , UpperCamelCase : int=0.1 , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : Union[str, Any]=224 , UpperCamelCase : List[str]=1_000 , UpperCamelCase : Optional[Any]=[3, 3, 6, 4] , UpperCamelCase : str=[48, 56, 112, 220] , ): '''simple docstring''' __UpperCAmelCase : Dict = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : Optional[Any] = num_channels __UpperCAmelCase : Dict = is_training __UpperCAmelCase : Dict = use_labels __UpperCAmelCase : List[Any] = hidden_dropout_prob __UpperCAmelCase : Dict = attention_probs_dropout_prob __UpperCAmelCase : Dict = num_labels __UpperCAmelCase : int = image_size __UpperCAmelCase : str = layer_depths __UpperCAmelCase : Tuple = embed_dims def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : Any = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_labels ) __UpperCAmelCase : Dict = self.get_config() return config, pixel_values, labels def lowerCamelCase__ ( self : str ): '''simple docstring''' return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=UpperCamelCase , layer_scale_init_value=1e-5 , ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Tuple = SwiftFormerModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : Dict = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[str] = self.num_labels __UpperCAmelCase : Any = SwiftFormerForImageClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : List[str] = model(UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) __UpperCAmelCase : Any = SwiftFormerForImageClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' ((__UpperCAmelCase) ,(__UpperCAmelCase) ,(__UpperCAmelCase)) : List[Any] = self.prepare_config_and_inputs() __UpperCAmelCase : Dict = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase__ ( A , A , unittest.TestCase ): """simple docstring""" __a = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () __a = ( {"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification} if is_torch_available() else {} ) __a = False __a = False __a = False __a = False __a = False def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : List[str] = SwiftFormerModelTester(self ) __UpperCAmelCase : List[str] = ConfigTester( self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def lowerCamelCase__ ( self : str ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Optional[int] = model_class(UpperCamelCase ) __UpperCAmelCase : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Any = model_class(UpperCamelCase ) __UpperCAmelCase : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : Tuple = [*signature.parameters.keys()] __UpperCAmelCase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase ) @slow def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Optional[int] = SwiftFormerModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @unittest.skip(reason="""SwiftFormer does not output attentions""" ) def lowerCamelCase__ ( self : int ): '''simple docstring''' pass def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' def check_hidden_states_output(UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : Optional[int] ): __UpperCAmelCase : Any = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : Any = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) __UpperCAmelCase : int = outputs.hidden_states __UpperCAmelCase : int = 8 self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(UpperCamelCase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) __UpperCAmelCase ,__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Tuple = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Dict = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' def _config_zero_init(UpperCamelCase : str ): __UpperCAmelCase : Optional[int] = copy.deepcopy(UpperCamelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(UpperCamelCase , UpperCamelCase , 1e-1_0 ) if isinstance(getattr(UpperCamelCase , UpperCamelCase , UpperCamelCase ) , UpperCamelCase ): __UpperCAmelCase : List[Any] = _config_zero_init(getattr(UpperCamelCase , UpperCamelCase ) ) setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return configs_no_init __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Dict = _config_zero_init(UpperCamelCase ) for model_class in self.all_model_classes: __UpperCAmelCase : List[str] = model_class(config=UpperCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' pass def lowerCamelCase ( ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None @slow def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[str] = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(UpperCamelCase ) __UpperCAmelCase : Any = self.default_image_processor __UpperCAmelCase : Optional[int] = prepare_img() __UpperCAmelCase : Dict = image_processor(images=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase ) # forward pass with torch.no_grad(): __UpperCAmelCase : Dict = model(**UpperCamelCase ) # verify the logits __UpperCAmelCase : Dict = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) __UpperCAmelCase : int = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCAmelCase : Dict = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" import argparse UpperCAmelCase : Optional[int] = 'docs/source/_static/js/custom.js' def lowerCamelCase ( _UpperCamelCase : str ) -> Union[str, Any]: '''simple docstring''' with open(_UpperCamelCase , encoding="""utf-8""" , newline="""\n""" ) as f: __UpperCAmelCase : Optional[Any] = f.readlines() __UpperCAmelCase : str = 0 # First let's put the right version while not lines[index].startswith("""const stableVersion =""" ): index += 1 __UpperCAmelCase : int = f'''const stableVersion = "v{version}"\n''' # Then update the dictionary while not lines[index].startswith("""const versionMapping = {""" ): index += 1 # We go until the end while not lines[index].startswith("""}""" ): index += 1 # We add the new version at the end lines[index - 1] += f''' "v{version}": "v{version}",\n''' with open(_UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : int = argparse.ArgumentParser() parser.add_argument('--version', help='Release version.') UpperCAmelCase : Tuple = parser.parse_args() update_custom_js(args.version)
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase : List[str] = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Dict = logging.get_logger(__name__) UpperCAmelCase : Dict = { 'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class lowerCamelCase__ ( A ): """simple docstring""" __a = """wav2vec2""" def __init__( self : Dict , UpperCamelCase : Tuple=32 , UpperCamelCase : Optional[Any]=768 , UpperCamelCase : Any=12 , UpperCamelCase : int=12 , UpperCamelCase : Dict=3_072 , UpperCamelCase : str="gelu" , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : Any=0.0 , UpperCamelCase : int=0.1 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : str=0.02 , UpperCamelCase : List[Any]=1e-5 , UpperCamelCase : int="group" , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : List[Any]=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase : Tuple=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase : Any=False , UpperCamelCase : str=128 , UpperCamelCase : Dict=16 , UpperCamelCase : List[str]=False , UpperCamelCase : List[Any]=True , UpperCamelCase : Dict=0.05 , UpperCamelCase : str=10 , UpperCamelCase : Tuple=2 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : Optional[Any]=10 , UpperCamelCase : List[str]=0 , UpperCamelCase : Dict=320 , UpperCamelCase : Any=2 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : str=100 , UpperCamelCase : Dict=256 , UpperCamelCase : int=256 , UpperCamelCase : Dict=0.1 , UpperCamelCase : Optional[Any]="sum" , UpperCamelCase : Optional[Any]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=256 , UpperCamelCase : str=(512, 512, 512, 512, 1_500) , UpperCamelCase : Any=(5, 3, 3, 1, 1) , UpperCamelCase : str=(1, 2, 3, 1, 1) , UpperCamelCase : List[Any]=512 , UpperCamelCase : List[str]=0 , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : int=2 , UpperCamelCase : Dict=False , UpperCamelCase : int=3 , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : Dict=3 , UpperCamelCase : str=None , UpperCamelCase : List[str]=None , **UpperCamelCase : List[str] , ): '''simple docstring''' super().__init__(**UpperCamelCase , pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase ) __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : List[str] = feat_extract_norm __UpperCAmelCase : List[str] = feat_extract_activation __UpperCAmelCase : Union[str, Any] = list(UpperCamelCase ) __UpperCAmelCase : Dict = list(UpperCamelCase ) __UpperCAmelCase : Any = list(UpperCamelCase ) __UpperCAmelCase : Optional[int] = conv_bias __UpperCAmelCase : Optional[int] = num_conv_pos_embeddings __UpperCAmelCase : List[str] = num_conv_pos_embedding_groups __UpperCAmelCase : str = len(self.conv_dim ) __UpperCAmelCase : int = num_hidden_layers __UpperCAmelCase : List[str] = intermediate_size __UpperCAmelCase : str = hidden_act __UpperCAmelCase : int = num_attention_heads __UpperCAmelCase : Any = hidden_dropout __UpperCAmelCase : Any = attention_dropout __UpperCAmelCase : Tuple = activation_dropout __UpperCAmelCase : Tuple = feat_proj_dropout __UpperCAmelCase : Tuple = final_dropout __UpperCAmelCase : Optional[int] = layerdrop __UpperCAmelCase : List[str] = layer_norm_eps __UpperCAmelCase : Any = initializer_range __UpperCAmelCase : int = vocab_size __UpperCAmelCase : int = do_stable_layer_norm __UpperCAmelCase : List[str] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __UpperCAmelCase : Tuple = apply_spec_augment __UpperCAmelCase : List[Any] = mask_time_prob __UpperCAmelCase : Dict = mask_time_length __UpperCAmelCase : List[Any] = mask_time_min_masks __UpperCAmelCase : List[str] = mask_feature_prob __UpperCAmelCase : str = mask_feature_length __UpperCAmelCase : Optional[Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations __UpperCAmelCase : Optional[Any] = num_codevectors_per_group __UpperCAmelCase : Tuple = num_codevector_groups __UpperCAmelCase : Union[str, Any] = contrastive_logits_temperature __UpperCAmelCase : Tuple = feat_quantizer_dropout __UpperCAmelCase : Optional[Any] = num_negatives __UpperCAmelCase : Tuple = codevector_dim __UpperCAmelCase : Tuple = proj_codevector_dim __UpperCAmelCase : str = diversity_loss_weight # ctc loss __UpperCAmelCase : int = ctc_loss_reduction __UpperCAmelCase : Dict = ctc_zero_infinity # adapter __UpperCAmelCase : List[str] = add_adapter __UpperCAmelCase : Union[str, Any] = adapter_kernel_size __UpperCAmelCase : Optional[Any] = adapter_stride __UpperCAmelCase : str = num_adapter_layers __UpperCAmelCase : List[str] = output_hidden_size or hidden_size __UpperCAmelCase : Tuple = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. __UpperCAmelCase : List[str] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __UpperCAmelCase : int = list(UpperCamelCase ) __UpperCAmelCase : int = list(UpperCamelCase ) __UpperCAmelCase : Optional[int] = list(UpperCamelCase ) __UpperCAmelCase : int = xvector_output_dim @property def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
320
"""simple docstring""" def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = [] __UpperCAmelCase : List[str] = 1 while len(_UpperCamelCase ) < 1E6: constant.append(str(_UpperCamelCase ) ) i += 1 __UpperCAmelCase : List[str] = """""".join(_UpperCamelCase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[9_9] ) * int(constant[9_9_9] ) * int(constant[9_9_9_9] ) * int(constant[9_9_9_9_9] ) * int(constant[9_9_9_9_9_9] ) ) if __name__ == "__main__": print(solution())
320
1
"""simple docstring""" import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging UpperCAmelCase : str = logging.get_logger(__name__) def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Any = nn.functional.normalize(_UpperCamelCase ) __UpperCAmelCase : str = nn.functional.normalize(_UpperCamelCase ) return torch.mm(_UpperCamelCase , normalized_text_embeds.t() ) class lowerCamelCase__ ( A ): """simple docstring""" __a = CLIPConfig __a = ["""CLIPEncoderLayer"""] def __init__( self : Dict , UpperCamelCase : CLIPConfig ): '''simple docstring''' super().__init__(UpperCamelCase ) __UpperCAmelCase : Optional[int] = CLIPVisionModel(config.vision_config ) __UpperCAmelCase : List[Any] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=UpperCamelCase ) __UpperCAmelCase : List[Any] = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=UpperCamelCase ) __UpperCAmelCase : str = nn.Parameter(torch.ones(17 ) , requires_grad=UpperCamelCase ) __UpperCAmelCase : Tuple = nn.Parameter(torch.ones(3 ) , requires_grad=UpperCamelCase ) @torch.no_grad() def lowerCamelCase__ ( self : str , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : str = self.vision_model(UpperCamelCase )[1] # pooled_output __UpperCAmelCase : Optional[int] = self.visual_projection(UpperCamelCase ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __UpperCAmelCase : Dict = cosine_distance(UpperCamelCase , self.special_care_embeds ).cpu().float().numpy() __UpperCAmelCase : str = cosine_distance(UpperCamelCase , self.concept_embeds ).cpu().float().numpy() __UpperCAmelCase : str = [] __UpperCAmelCase : str = image_embeds.shape[0] for i in range(UpperCamelCase ): __UpperCAmelCase : Union[str, Any] = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images __UpperCAmelCase : Dict = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): __UpperCAmelCase : List[Any] = special_cos_dist[i][concept_idx] __UpperCAmelCase : int = self.special_care_embeds_weights[concept_idx].item() __UpperCAmelCase : Tuple = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} ) __UpperCAmelCase : Optional[int] = 0.01 for concept_idx in range(len(cos_dist[0] ) ): __UpperCAmelCase : str = cos_dist[i][concept_idx] __UpperCAmelCase : Optional[Any] = self.concept_embeds_weights[concept_idx].item() __UpperCAmelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(UpperCamelCase ) result.append(UpperCamelCase ) __UpperCAmelCase : str = [len(res["""bad_concepts"""] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def lowerCamelCase__ ( self : List[str] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : torch.FloatTensor ): '''simple docstring''' __UpperCAmelCase : Dict = self.vision_model(UpperCamelCase )[1] # pooled_output __UpperCAmelCase : int = self.visual_projection(UpperCamelCase ) __UpperCAmelCase : int = cosine_distance(UpperCamelCase , self.special_care_embeds ) __UpperCAmelCase : Any = cosine_distance(UpperCamelCase , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images __UpperCAmelCase : Tuple = 0.0 __UpperCAmelCase : Any = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) __UpperCAmelCase : List[Any] = torch.any(special_scores > 0 , dim=1 ) __UpperCAmelCase : List[Any] = special_care * 0.01 __UpperCAmelCase : Dict = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) __UpperCAmelCase : Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) __UpperCAmelCase : List[Any] = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Tuple = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : str , UpperCamelCase : List[Any] , UpperCamelCase : Tuple=100 , UpperCamelCase : List[Any]=13 , UpperCamelCase : List[str]=30 , UpperCamelCase : List[str]=2 , UpperCamelCase : Dict=3 , UpperCamelCase : List[str]=True , UpperCamelCase : Tuple=True , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Tuple=5 , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : Optional[int]=37 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : int=10 , UpperCamelCase : List[str]=0.02 , UpperCamelCase : List[str]=3 , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : Any = vocab_size __UpperCAmelCase : List[Any] = batch_size __UpperCAmelCase : Dict = image_size __UpperCAmelCase : Tuple = patch_size __UpperCAmelCase : List[str] = num_channels __UpperCAmelCase : Any = is_training __UpperCAmelCase : Optional[int] = use_labels __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : Optional[Any] = intermediate_size __UpperCAmelCase : Dict = hidden_act __UpperCAmelCase : Optional[int] = hidden_dropout_prob __UpperCAmelCase : Optional[int] = attention_probs_dropout_prob __UpperCAmelCase : int = type_sequence_label_size __UpperCAmelCase : Optional[Any] = initializer_range # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __UpperCAmelCase : Any = (image_size // patch_size) ** 2 __UpperCAmelCase : Tuple = num_patches + 1 def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : Optional[int] = None if self.use_labels: __UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : Optional[Any] = BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , ) return config, pixel_values, labels def lowerCamelCase__ ( self : Any , UpperCamelCase : Dict , UpperCamelCase : str , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : List[Any] = FlaxBeitModel(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : int = FlaxBeitForMaskedImageModeling(config=UpperCamelCase ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : int , UpperCamelCase : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Dict = self.type_sequence_label_size __UpperCAmelCase : str = FlaxBeitForImageClassification(config=UpperCamelCase ) __UpperCAmelCase : Optional[int] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __UpperCAmelCase : Optional[Any] = 1 __UpperCAmelCase : Optional[int] = FlaxBeitForImageClassification(UpperCamelCase ) __UpperCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __UpperCAmelCase : int = model(UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Any = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : List[str] = config_and_inputs __UpperCAmelCase : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : List[str] = FlaxBeitModelTester(self ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Tuple = model_class(UpperCamelCase ) __UpperCAmelCase : Any = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()] __UpperCAmelCase : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : Dict = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase ) @jax.jit def model_jitted(UpperCamelCase : Optional[Any] , **UpperCamelCase : Tuple ): return model(pixel_values=UpperCamelCase , **UpperCamelCase ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : str = model_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : Any = model_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase ) @slow def lowerCamelCase__ ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCAmelCase : Tuple = model_class_name.from_pretrained("""microsoft/beit-base-patch16-224""" ) __UpperCAmelCase : Dict = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(UpperCamelCase ) def lowerCamelCase ( ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @require_flax class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FlaxBeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ) __UpperCAmelCase : Optional[int] = self.default_image_processor __UpperCAmelCase : Tuple = prepare_img() __UpperCAmelCase : str = image_processor(images=UpperCamelCase , return_tensors="""np""" ).pixel_values # prepare bool_masked_pos __UpperCAmelCase : Union[str, Any] = np.ones((1, 196) , dtype=UpperCamelCase ) # forward pass __UpperCAmelCase : Tuple = model(pixel_values=UpperCamelCase , bool_masked_pos=UpperCamelCase ) __UpperCAmelCase : Dict = outputs.logits # verify the logits __UpperCAmelCase : Any = (1, 196, 8_192) self.assertEqual(logits.shape , UpperCamelCase ) __UpperCAmelCase : Any = np.array( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , UpperCamelCase , atol=1e-2 ) ) @slow def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Any = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ) __UpperCAmelCase : int = self.default_image_processor __UpperCAmelCase : Dict = prepare_img() __UpperCAmelCase : List[Any] = image_processor(images=UpperCamelCase , return_tensors="""np""" ) # forward pass __UpperCAmelCase : Any = model(**UpperCamelCase ) __UpperCAmelCase : Tuple = outputs.logits # verify the logits __UpperCAmelCase : Optional[int] = (1, 1_000) self.assertEqual(logits.shape , UpperCamelCase ) __UpperCAmelCase : str = np.array([-1.2385, -1.0987, -1.0108] ) self.assertTrue(np.allclose(logits[0, :3] , UpperCamelCase , atol=1e-4 ) ) __UpperCAmelCase : int = 281 self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase ) @slow def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : str = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ) __UpperCAmelCase : Dict = self.default_image_processor __UpperCAmelCase : Dict = prepare_img() __UpperCAmelCase : List[Any] = image_processor(images=UpperCamelCase , return_tensors="""np""" ) # forward pass __UpperCAmelCase : str = model(**UpperCamelCase ) __UpperCAmelCase : Optional[int] = outputs.logits # verify the logits __UpperCAmelCase : List[Any] = (1, 21_841) self.assertEqual(logits.shape , UpperCamelCase ) __UpperCAmelCase : Tuple = np.array([1.6881, -0.2787, 0.5901] ) self.assertTrue(np.allclose(logits[0, :3] , UpperCamelCase , atol=1e-4 ) ) __UpperCAmelCase : str = 2_396 self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase )
320
"""simple docstring""" import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput UpperCAmelCase : Optional[Any] = 'scheduler_config.json' class lowerCamelCase__ ( A ): """simple docstring""" __a = 1 __a = 2 __a = 3 __a = 4 __a = 5 __a = 6 __a = 7 __a = 8 __a = 9 __a = 10 __a = 11 __a = 12 __a = 13 __a = 14 @dataclass class lowerCamelCase__ ( A ): """simple docstring""" __a = 42 class lowerCamelCase__ : """simple docstring""" __a = SCHEDULER_CONFIG_NAME __a = [] __a = True @classmethod def lowerCamelCase__ ( cls : Any , UpperCamelCase : Dict[str, Any] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[Any] = cls.load_config( pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , ) return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : Union[str, os.PathLike] , UpperCamelCase : bool = False , **UpperCamelCase : Optional[Any] ): '''simple docstring''' self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return self._get_compatibles() @classmethod def lowerCamelCase__ ( cls : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = list(set([cls.__name__] + cls._compatibles ) ) __UpperCAmelCase : List[str] = importlib.import_module(__name__.split(""".""" )[0] ) __UpperCAmelCase : List[str] = [ getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase ) ] return compatible_classes
320
1
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ] ) class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' if self.framework == "pytorch": subprocess.run( f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=UpperCamelCase , ) assert hasattr(self , """env""" ) def lowerCamelCase__ ( self : int , UpperCamelCase : Dict ): '''simple docstring''' __UpperCAmelCase : int = { """enabled""": True, """processes_per_host""": 8, } __UpperCAmelCase : Union[str, Any] = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } __UpperCAmelCase : Optional[int] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} __UpperCAmelCase : Optional[Any] = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase , py_version="""py36""" , ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : Dict ): '''simple docstring''' TrainingJobAnalytics(UpperCamelCase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(1,)] ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[int] ): '''simple docstring''' __UpperCAmelCase : str = self.create_estimator(UpperCamelCase ) # run training estimator.fit() # result dataframe __UpperCAmelCase : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis __UpperCAmelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) __UpperCAmelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping __UpperCAmelCase : Optional[Any] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , UpperCamelCase )
320
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase__ ( *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ): '''simple docstring''' pass def lowerCamelCase ( _UpperCamelCase : Image ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = hashlib.mda(image.tobytes() ) return m.hexdigest()[:1_0] def lowerCamelCase ( _UpperCamelCase : Image ) -> Dict: '''simple docstring''' __UpperCAmelCase : Tuple = np.array(_UpperCamelCase ) __UpperCAmelCase : List[Any] = npimg.shape return {"hash": hashimage(_UpperCamelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" __a = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __a = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' pass @require_tf @unittest.skip("""Image segmentation not implemented in TF""" ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Tuple = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" ) __UpperCAmelCase : Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : int = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, {"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967}, {"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993}, {"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909}, {"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879}, {"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834}, {"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716}, {"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612}, {"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599}, {"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552}, {"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532}, {"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516}, {"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499}, {"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483}, {"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464}, {"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408}, {"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335}, {"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326}, {"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262}, {"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999}, {"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986}, {"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984}, {"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873}, {"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871} ] , ) # fmt: on @require_torch @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Any = """facebook/sam-vit-huge""" __UpperCAmelCase : str = pipeline("""mask-generation""" , model=UpperCamelCase ) __UpperCAmelCase : int = image_segmenter( """http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : Dict = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, ] , )
320
1
"""simple docstring""" import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : List[str] ) -> Any: '''simple docstring''' assert isinstance(_UpperCamelCase , _UpperCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : str ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : str = tmp_path / """cache""" __UpperCAmelCase : Optional[Any] = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __UpperCAmelCase : List[Any] = TextDatasetReader(_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase ).read() _check_text_dataset(_UpperCamelCase , _UpperCamelCase ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : List[Any] = tmp_path / """cache""" __UpperCAmelCase : Tuple = {"""text""": """string"""} __UpperCAmelCase : List[Any] = features.copy() if features else default_expected_features __UpperCAmelCase : Dict = ( Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase : str = TextDatasetReader(_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase ).read() _check_text_dataset(_UpperCamelCase , _UpperCamelCase ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : str ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = tmp_path / """cache""" __UpperCAmelCase : Any = {"""text""": """string"""} __UpperCAmelCase : str = TextDatasetReader(_UpperCamelCase , cache_dir=_UpperCamelCase , split=_UpperCamelCase ).read() _check_text_dataset(_UpperCamelCase , _UpperCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ) -> Optional[int]: '''simple docstring''' if issubclass(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Union[str, Any] = text_path elif issubclass(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : int = [text_path] __UpperCAmelCase : List[Any] = tmp_path / """cache""" __UpperCAmelCase : Dict = {"""text""": """string"""} __UpperCAmelCase : int = TextDatasetReader(_UpperCamelCase , cache_dir=_UpperCamelCase ).read() _check_text_dataset(_UpperCamelCase , _UpperCamelCase ) def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : str=("train",) ) -> Dict: '''simple docstring''' assert isinstance(_UpperCamelCase , _UpperCamelCase ) for split in splits: __UpperCAmelCase : Optional[Any] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) -> Any: '''simple docstring''' __UpperCAmelCase : int = tmp_path / """cache""" __UpperCAmelCase : List[Any] = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __UpperCAmelCase : List[str] = TextDatasetReader({"""train""": text_path} , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase ).read() _check_text_datasetdict(_UpperCamelCase , _UpperCamelCase ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : str ) -> Any: '''simple docstring''' __UpperCAmelCase : Tuple = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __UpperCAmelCase : Union[str, Any] = {"""text""": """string"""} __UpperCAmelCase : str = features.copy() if features else default_expected_features __UpperCAmelCase : List[str] = ( Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase : Optional[int] = TextDatasetReader({"""train""": text_path} , features=_UpperCamelCase , cache_dir=_UpperCamelCase ).read() _check_text_datasetdict(_UpperCamelCase , _UpperCamelCase ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict ) -> List[Any]: '''simple docstring''' if split: __UpperCAmelCase : List[str] = {split: text_path} else: __UpperCAmelCase : Optional[Any] = """train""" __UpperCAmelCase : Any = {"""train""": text_path, """test""": text_path} __UpperCAmelCase : List[Any] = tmp_path / """cache""" __UpperCAmelCase : List[Any] = {"""text""": """string"""} __UpperCAmelCase : Any = TextDatasetReader(_UpperCamelCase , cache_dir=_UpperCamelCase ).read() _check_text_datasetdict(_UpperCamelCase , _UpperCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
320
"""simple docstring""" import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , UpperCamelCase : str ): '''simple docstring''' super().__init__() __UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase ) __UpperCAmelCase : int = list(model.children() )[:-2] __UpperCAmelCase : List[Any] = nn.Sequential(*UpperCamelCase ) __UpperCAmelCase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.pool(self.model(UpperCamelCase ) ) __UpperCAmelCase : List[Any] = torch.flatten(UpperCamelCase , start_dim=2 ) __UpperCAmelCase : Any = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )] __UpperCAmelCase : Any = os.path.dirname(UpperCamelCase ) __UpperCAmelCase : List[str] = tokenizer __UpperCAmelCase : str = labels __UpperCAmelCase : Optional[int] = len(UpperCamelCase ) __UpperCAmelCase : int = max_seq_length __UpperCAmelCase : int = transforms def __len__( self : List[str] ): '''simple docstring''' return len(self.data ) def __getitem__( self : List[str] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) ) __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1] __UpperCAmelCase : Any = sentence[: self.max_seq_length] __UpperCAmelCase : Tuple = torch.zeros(self.n_classes ) __UpperCAmelCase : str = 1 __UpperCAmelCase : Any = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) __UpperCAmelCase : Optional[int] = self.transforms(UpperCamelCase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Any = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = [len(row["""sentence"""] ) for row in batch] __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase ) __UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) __UpperCAmelCase : str = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ): __UpperCAmelCase : List[str] = input_row["""sentence"""] __UpperCAmelCase : Tuple = 1 __UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] ) __UpperCAmelCase : Optional[Any] = torch.stack([row["""label"""] for row in batch] ) __UpperCAmelCase : str = torch.stack([row["""image_start_token"""] for row in batch] ) __UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ) -> int: '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' return transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ), ] )
320
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int: '''simple docstring''' try: __UpperCAmelCase : str = int(_UpperCamelCase ) except (TypeError, ValueError): raise TypeError("""Parameter n must be int or castable to int.""" ) if n <= 0: raise ValueError("""Parameter n must be greater than or equal to one.""" ) __UpperCAmelCase : Any = 2 __UpperCAmelCase : List[Any] = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 __UpperCAmelCase : str = i while n % i == 0: __UpperCAmelCase : Any = n // i i += 1 return int(_UpperCamelCase ) if __name__ == "__main__": print(F"{solution() = }")
320
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
320
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Dict ) -> List[str]: '''simple docstring''' if index == r: for j in range(_UpperCamelCase ): print(data[j] , end=""" """ ) print(""" """ ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location __UpperCAmelCase : Any = arr[i] combination_util(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : List[str] ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : str = [0] * r # Print all combination using temporary array 'data[]' combination_util(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , 0 , _UpperCamelCase , 0 ) if __name__ == "__main__": # Driver code to check the function above UpperCAmelCase : Dict = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
320
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ) __UpperCAmelCase : List[Any] = sum(_UpperCamelCase ) __UpperCAmelCase : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __UpperCAmelCase : Any = True for i in range(1 , s + 1 ): __UpperCAmelCase : List[Any] = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __UpperCAmelCase : Optional[int] = dp[i][j - 1] if arr[i - 1] <= j: __UpperCAmelCase : Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __UpperCAmelCase : Optional[int] = s - 2 * j break return diff
320
1
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : str = { 'configuration_trajectory_transformer': [ 'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrajectoryTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : int = [ 'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrajectoryTransformerModel', 'TrajectoryTransformerPreTrainedModel', 'load_tf_weights_in_trajectory_transformer', ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase : Optional[int] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""pixel_values"""] def __init__( self : Tuple , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : int = do_resize __UpperCAmelCase : Tuple = size __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : Any = do_center_crop __UpperCAmelCase : int = crop_size __UpperCAmelCase : Optional[int] = do_rescale __UpperCAmelCase : List[Any] = rescale_factor __UpperCAmelCase : Tuple = do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD __UpperCAmelCase : List[Any] = do_convert_rgb def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __UpperCAmelCase : int = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Dict = size if size is not None else self.size __UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Dict = resample if resample is not None else self.resample __UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Any = image_std if image_std is not None else self.image_std __UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCAmelCase : List[str] = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCAmelCase : int = [convert_to_rgb(UpperCamelCase ) for image in images] # All transformations expect numpy arrays. __UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: __UpperCAmelCase : Optional[int] = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: __UpperCAmelCase : int = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: __UpperCAmelCase : Dict = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: __UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] __UpperCAmelCase : Any = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] __UpperCAmelCase : Any = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
320
1
"""simple docstring""" import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version UpperCAmelCase : Dict = logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') UpperCAmelCase : int = { 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization UpperCAmelCase : Dict = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } UpperCAmelCase : Optional[Any] = sorted(arg_to_scheduler.keys()) UpperCAmelCase : List[str] = '{' + ', '.join(arg_to_scheduler_choices) + '}' class lowerCamelCase__ ( pl.LightningModule ): """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase : argparse.Namespace , UpperCamelCase : Dict=None , UpperCamelCase : Tuple="base" , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Union[str, Any]=None , **UpperCamelCase : List[str] , ): '''simple docstring''' super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(UpperCamelCase ) __UpperCAmelCase : Optional[Any] = 0 __UpperCAmelCase : List[Any] = Path(self.hparams.output_dir ) __UpperCAmelCase : Any = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=UpperCamelCase , **UpperCamelCase , ) else: __UpperCAmelCase : PretrainedConfig = config __UpperCAmelCase : Optional[int] = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(self.hparams , UpperCamelCase , UpperCamelCase ): assert hasattr(self.config , UpperCamelCase ), f'''model config doesn\'t have a `{p}` attribute''' setattr(self.config , UpperCamelCase , getattr(self.hparams , UpperCamelCase ) ) if tokenizer is None: __UpperCAmelCase : Dict = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=UpperCamelCase , ) else: __UpperCAmelCase : PreTrainedTokenizer = tokenizer __UpperCAmelCase : List[Any] = MODEL_MODES[mode] if model is None: __UpperCAmelCase : str = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=UpperCamelCase , ) else: __UpperCAmelCase : Optional[Any] = model def lowerCamelCase__ ( self : int , *UpperCamelCase : Optional[int] , **UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Any = self.model_type.from_pretrained(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler] __UpperCAmelCase : Union[str, Any] = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __UpperCAmelCase : Optional[int] = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1} return scheduler def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : str = self.model __UpperCAmelCase : Dict = ["""bias""", """LayerNorm.weight"""] __UpperCAmelCase : List[Any] = [ { """params""": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters """weight_decay""": self.hparams.weight_decay, }, { """params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] if self.hparams.adafactor: __UpperCAmelCase : Tuple = Adafactor( UpperCamelCase , lr=self.hparams.learning_rate , scale_parameter=UpperCamelCase , relative_step=UpperCamelCase ) else: __UpperCAmelCase : Dict = AdamW( UpperCamelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __UpperCAmelCase : Tuple = optimizer __UpperCAmelCase : List[str] = self.get_lr_scheduler() return [optimizer], [scheduler] def lowerCamelCase__ ( self : str , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ): '''simple docstring''' return self.validation_step(UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : List[str] ): '''simple docstring''' return self.validation_end(UpperCamelCase ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __UpperCAmelCase : List[str] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str ): '''simple docstring''' if stage == "test": __UpperCAmelCase : List[Any] = len(self.test_dataloader().dataset ) else: __UpperCAmelCase : Optional[Any] = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=UpperCamelCase ) __UpperCAmelCase : int = len(self.train_dataloader().dataset ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : bool = False ): '''simple docstring''' raise NotImplementedError("""You must implement this for your task""" ) def lowerCamelCase__ ( self : str ): '''simple docstring''' return self.train_loader def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=UpperCamelCase ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : Tuple ): '''simple docstring''' return os.path.join( self.hparams.data_dir , """cached_{}_{}_{}""".format( UpperCamelCase , list(filter(UpperCamelCase , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Dict[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = self.output_dir.joinpath("""best_tfmr""" ) __UpperCAmelCase : Optional[int] = self.step_count self.model.save_pretrained(UpperCamelCase ) self.tokenizer.save_pretrained(UpperCamelCase ) @staticmethod def lowerCamelCase__ ( UpperCamelCase : Any , UpperCamelCase : Tuple ): '''simple docstring''' parser.add_argument( """--model_name_or_path""" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--config_name""" , default="""""" , type=UpperCamelCase , help="""Pretrained config name or path if not the same as model_name""" ) parser.add_argument( """--tokenizer_name""" , default=UpperCamelCase , type=UpperCamelCase , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument( """--cache_dir""" , default=str(Path(UpperCamelCase ).parent / """test_run""" / """cache""" ) , type=UpperCamelCase , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , ) parser.add_argument( """--encoder_layerdrop""" , type=UpperCamelCase , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--decoder_layerdrop""" , type=UpperCamelCase , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--dropout""" , type=UpperCamelCase , help="""Dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--attention_dropout""" , type=UpperCamelCase , help="""Attention dropout probability (Optional). Goes into model.config""" , ) parser.add_argument("""--learning_rate""" , default=5e-5 , type=UpperCamelCase , help="""The initial learning rate for Adam.""" ) parser.add_argument( """--lr_scheduler""" , default="""linear""" , choices=UpperCamelCase , metavar=UpperCamelCase , type=UpperCamelCase , help="""Learning rate scheduler""" , ) parser.add_argument("""--weight_decay""" , default=0.0 , type=UpperCamelCase , help="""Weight decay if we apply some.""" ) parser.add_argument("""--adam_epsilon""" , default=1e-8 , type=UpperCamelCase , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--warmup_steps""" , default=0 , type=UpperCamelCase , help="""Linear warmup over warmup_steps.""" ) parser.add_argument("""--num_workers""" , default=4 , type=UpperCamelCase , help="""kwarg passed to DataLoader""" ) parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=UpperCamelCase ) parser.add_argument("""--train_batch_size""" , default=32 , type=UpperCamelCase ) parser.add_argument("""--eval_batch_size""" , default=32 , type=UpperCamelCase ) parser.add_argument("""--adafactor""" , action="""store_true""" ) class lowerCamelCase__ ( pl.Callback ): """simple docstring""" def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] ): '''simple docstring''' if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class lowerCamelCase__ ( pl.Callback ): """simple docstring""" def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : Tuple ): '''simple docstring''' for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(UpperCamelCase ) class lowerCamelCase__ ( pl.Callback ): """simple docstring""" def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = trainer.lr_schedulers[0]["""scheduler"""] __UpperCAmelCase : Optional[Any] = {f'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : pl.Trainer , UpperCamelCase : pl.LightningModule ): '''simple docstring''' rank_zero_info("""***** Validation results *****""" ) __UpperCAmelCase : Optional[int] = trainer.callback_metrics # Log results for key in sorted(UpperCamelCase ): if key not in ["log", "progress_bar"]: rank_zero_info("""{} = {}\n""".format(UpperCamelCase , str(metrics[key] ) ) ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : pl.Trainer , UpperCamelCase : pl.LightningModule ): '''simple docstring''' rank_zero_info("""***** Test results *****""" ) __UpperCAmelCase : Optional[int] = trainer.callback_metrics # Log and save results to file __UpperCAmelCase : List[str] = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" ) with open(UpperCamelCase , """w""" ) as writer: for key in sorted(UpperCamelCase ): if key not in ["log", "progress_bar"]: rank_zero_info("""{} = {}\n""".format(UpperCamelCase , str(metrics[key] ) ) ) writer.write("""{} = {}\n""".format(UpperCamelCase , str(metrics[key] ) ) ) def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> None: '''simple docstring''' parser.add_argument( """--output_dir""" , default=str(Path(_UpperCamelCase ).parent / """test_run""" / """model_checkpoints""" ) , type=_UpperCamelCase , help="""The output directory where the model predictions and checkpoints will be written.""" , ) parser.add_argument( """--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , ) parser.add_argument( """--fp16_opt_level""" , type=_UpperCamelCase , default="""O2""" , help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) , ) parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=_UpperCamelCase ) parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=_UpperCamelCase , help="""Max gradient norm""" ) parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" ) parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" ) parser.add_argument( """--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=_UpperCamelCase , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , ) parser.add_argument("""--seed""" , type=_UpperCamelCase , default=4_2 , help="""random seed for initialization""" ) parser.add_argument( """--data_dir""" , default=str(Path(_UpperCamelCase ).parent / """test_run""" / """dummy-train-data""" ) , type=_UpperCamelCase , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , ) def lowerCamelCase ( _UpperCamelCase : BaseTransformer , _UpperCamelCase : argparse.Namespace , _UpperCamelCase : Any=None , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[Any]=[] , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Tuple=None , **_UpperCamelCase : List[Any] , ) -> Any: '''simple docstring''' pl.seed_everything(args.seed ) # init model __UpperCAmelCase : Optional[int] = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=_UpperCamelCase ) # add custom checkpoints if checkpoint_callback is None: __UpperCAmelCase : Optional[int] = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(_UpperCamelCase ) if logging_callback is None: __UpperCAmelCase : str = LoggingCallback() __UpperCAmelCase : Dict = {} if args.fpaa: __UpperCAmelCase : Union[str, Any] = 1_6 if args.gpus > 1: __UpperCAmelCase : int = """auto""" __UpperCAmelCase : List[str] = """ddp""" __UpperCAmelCase : int = args.accumulate_grad_batches __UpperCAmelCase : Dict = None __UpperCAmelCase : List[Any] = """auto""" __UpperCAmelCase : Union[str, Any] = pl.Trainer.from_argparse_args( _UpperCamelCase , weights_summary=_UpperCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_UpperCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **_UpperCamelCase , ) if args.do_train: trainer.fit(_UpperCamelCase ) else: print("""RAG modeling tests with new set functions successfuly executed!""" ) return trainer
320
"""simple docstring""" from collections.abc import Sequence def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) ) def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' __UpperCAmelCase : Dict = 0.0 for coeff in reversed(_UpperCamelCase ): __UpperCAmelCase : Any = result * x + coeff return result if __name__ == "__main__": UpperCAmelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCAmelCase : str = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
320
1
"""simple docstring""" import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) UpperCAmelCase : Tuple = { 'sample_size': 32, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 2, 'num_class_embeds': 1000, 'block_out_channels': [32, 64], 'attention_head_dim': 8, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'scale_shift', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } UpperCAmelCase : Union[str, Any] = { 'sample_size': 64, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 3, 'num_class_embeds': 1000, 'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4], 'attention_head_dim': 64, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'AttnUpBlock2D', 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'scale_shift', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } UpperCAmelCase : List[Any] = { 'sample_size': 256, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 2, 'num_class_embeds': None, 'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], 'attention_head_dim': 64, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'ResnetDownsampleBlock2D', 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'AttnUpBlock2D', 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', 'ResnetUpsampleBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'default', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } UpperCAmelCase : List[str] = { 'num_train_timesteps': 40, 'sigma_min': 0.002, 'sigma_max': 80.0, } UpperCAmelCase : List[str] = { 'num_train_timesteps': 201, 'sigma_min': 0.002, 'sigma_max': 80.0, } UpperCAmelCase : List[str] = { 'num_train_timesteps': 151, 'sigma_min': 0.002, 'sigma_max': 80.0, } def lowerCamelCase ( _UpperCamelCase : int ) -> str: '''simple docstring''' if isinstance(_UpperCamelCase , _UpperCamelCase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError("""boolean value expected""" ) def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=False ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = checkpoint[f'''{old_prefix}.in_layers.0.weight'''] __UpperCAmelCase : Dict = checkpoint[f'''{old_prefix}.in_layers.0.bias'''] __UpperCAmelCase : List[Any] = checkpoint[f'''{old_prefix}.in_layers.2.weight'''] __UpperCAmelCase : List[str] = checkpoint[f'''{old_prefix}.in_layers.2.bias'''] __UpperCAmelCase : int = checkpoint[f'''{old_prefix}.emb_layers.1.weight'''] __UpperCAmelCase : Optional[int] = checkpoint[f'''{old_prefix}.emb_layers.1.bias'''] __UpperCAmelCase : str = checkpoint[f'''{old_prefix}.out_layers.0.weight'''] __UpperCAmelCase : Optional[Any] = checkpoint[f'''{old_prefix}.out_layers.0.bias'''] __UpperCAmelCase : List[Any] = checkpoint[f'''{old_prefix}.out_layers.3.weight'''] __UpperCAmelCase : List[Any] = checkpoint[f'''{old_prefix}.out_layers.3.bias'''] if has_skip: __UpperCAmelCase : Union[str, Any] = checkpoint[f'''{old_prefix}.skip_connection.weight'''] __UpperCAmelCase : str = checkpoint[f'''{old_prefix}.skip_connection.bias'''] return new_checkpoint def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict=None ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 ) __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : int = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 ) __UpperCAmelCase : List[str] = checkpoint[f'''{old_prefix}.norm.weight'''] __UpperCAmelCase : Any = checkpoint[f'''{old_prefix}.norm.bias'''] __UpperCAmelCase : Tuple = weight_q.squeeze(-1 ).squeeze(-1 ) __UpperCAmelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 ) __UpperCAmelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 ) __UpperCAmelCase : Any = bias_k.squeeze(-1 ).squeeze(-1 ) __UpperCAmelCase : Union[str, Any] = weight_v.squeeze(-1 ).squeeze(-1 ) __UpperCAmelCase : Any = bias_v.squeeze(-1 ).squeeze(-1 ) __UpperCAmelCase : Optional[int] = ( checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 ) ) __UpperCAmelCase : List[str] = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Dict ) -> int: '''simple docstring''' __UpperCAmelCase : str = torch.load(_UpperCamelCase , map_location="""cpu""" ) __UpperCAmelCase : Dict = {} __UpperCAmelCase : Dict = checkpoint["""time_embed.0.weight"""] __UpperCAmelCase : int = checkpoint["""time_embed.0.bias"""] __UpperCAmelCase : List[str] = checkpoint["""time_embed.2.weight"""] __UpperCAmelCase : int = checkpoint["""time_embed.2.bias"""] if unet_config["num_class_embeds"] is not None: __UpperCAmelCase : List[Any] = checkpoint["""label_emb.weight"""] __UpperCAmelCase : Optional[Any] = checkpoint["""input_blocks.0.0.weight"""] __UpperCAmelCase : Union[str, Any] = checkpoint["""input_blocks.0.0.bias"""] __UpperCAmelCase : Optional[Any] = unet_config["""down_block_types"""] __UpperCAmelCase : Optional[int] = unet_config["""layers_per_block"""] __UpperCAmelCase : int = unet_config["""attention_head_dim"""] __UpperCAmelCase : Dict = unet_config["""block_out_channels"""] __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : Optional[int] = channels_list[0] for i, layer_type in enumerate(_UpperCamelCase ): __UpperCAmelCase : int = channels_list[i] __UpperCAmelCase : Union[str, Any] = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(_UpperCamelCase ): __UpperCAmelCase : str = f'''down_blocks.{i}.resnets.{j}''' __UpperCAmelCase : Dict = f'''input_blocks.{current_layer}.0''' __UpperCAmelCase : Any = True if j == 0 and downsample_block_has_skip else False __UpperCAmelCase : List[Any] = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_skip=_UpperCamelCase ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(_UpperCamelCase ): __UpperCAmelCase : Optional[int] = f'''down_blocks.{i}.resnets.{j}''' __UpperCAmelCase : Tuple = f'''input_blocks.{current_layer}.0''' __UpperCAmelCase : Optional[int] = True if j == 0 and downsample_block_has_skip else False __UpperCAmelCase : List[str] = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_skip=_UpperCamelCase ) __UpperCAmelCase : Dict = f'''down_blocks.{i}.attentions.{j}''' __UpperCAmelCase : Dict = f'''input_blocks.{current_layer}.1''' __UpperCAmelCase : Optional[int] = convert_attention( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) current_layer += 1 if i != len(_UpperCamelCase ) - 1: __UpperCAmelCase : Any = f'''down_blocks.{i}.downsamplers.0''' __UpperCAmelCase : Tuple = f'''input_blocks.{current_layer}.0''' __UpperCAmelCase : List[str] = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) current_layer += 1 __UpperCAmelCase : Union[str, Any] = current_channels # hardcoded the mid-block for now __UpperCAmelCase : Union[str, Any] = """mid_block.resnets.0""" __UpperCAmelCase : Any = """middle_block.0""" __UpperCAmelCase : Dict = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) __UpperCAmelCase : Tuple = """mid_block.attentions.0""" __UpperCAmelCase : Any = """middle_block.1""" __UpperCAmelCase : int = convert_attention(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) __UpperCAmelCase : Optional[int] = """mid_block.resnets.1""" __UpperCAmelCase : Union[str, Any] = """middle_block.2""" __UpperCAmelCase : str = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) __UpperCAmelCase : Any = 0 __UpperCAmelCase : Optional[int] = unet_config["""up_block_types"""] for i, layer_type in enumerate(_UpperCamelCase ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): __UpperCAmelCase : Dict = f'''up_blocks.{i}.resnets.{j}''' __UpperCAmelCase : Tuple = f'''output_blocks.{current_layer}.0''' __UpperCAmelCase : Dict = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_skip=_UpperCamelCase ) current_layer += 1 if i != len(_UpperCamelCase ) - 1: __UpperCAmelCase : int = f'''up_blocks.{i}.upsamplers.0''' __UpperCAmelCase : int = f'''output_blocks.{current_layer-1}.1''' __UpperCAmelCase : Union[str, Any] = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): __UpperCAmelCase : Optional[Any] = f'''up_blocks.{i}.resnets.{j}''' __UpperCAmelCase : List[Any] = f'''output_blocks.{current_layer}.0''' __UpperCAmelCase : Tuple = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_skip=_UpperCamelCase ) __UpperCAmelCase : List[Any] = f'''up_blocks.{i}.attentions.{j}''' __UpperCAmelCase : Union[str, Any] = f'''output_blocks.{current_layer}.1''' __UpperCAmelCase : List[str] = convert_attention( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) current_layer += 1 if i != len(_UpperCamelCase ) - 1: __UpperCAmelCase : Optional[int] = f'''up_blocks.{i}.upsamplers.0''' __UpperCAmelCase : Dict = f'''output_blocks.{current_layer-1}.2''' __UpperCAmelCase : Optional[Any] = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) __UpperCAmelCase : int = checkpoint["""out.0.weight"""] __UpperCAmelCase : List[Any] = checkpoint["""out.0.bias"""] __UpperCAmelCase : Optional[int] = checkpoint["""out.2.weight"""] __UpperCAmelCase : int = checkpoint["""out.2.bias"""] return new_checkpoint if __name__ == "__main__": UpperCAmelCase : int = argparse.ArgumentParser() parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.') parser.add_argument( '--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.' ) parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.') UpperCAmelCase : List[str] = parser.parse_args() UpperCAmelCase : int = strabool(args.class_cond) UpperCAmelCase : int = os.path.basename(args.unet_path) print(F"Checkpoint: {ckpt_name}") # Get U-Net config if "imagenet64" in ckpt_name: UpperCAmelCase : Dict = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): UpperCAmelCase : str = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: UpperCAmelCase : List[str] = TEST_UNET_CONFIG else: raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.") if not args.class_cond: UpperCAmelCase : List[Any] = None UpperCAmelCase : Any = con_pt_to_diffuser(args.unet_path, unet_config) UpperCAmelCase : Any = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: UpperCAmelCase : List[str] = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: UpperCAmelCase : List[Any] = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): UpperCAmelCase : Dict = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.") UpperCAmelCase : str = CMStochasticIterativeScheduler(**scheduler_config) UpperCAmelCase : List[Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
320
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase : Optional[int] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class lowerCamelCase__ : """simple docstring""" __a = PegasusConfig __a = {} __a = """gelu""" def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Any=4 , UpperCamelCase : Tuple=37 , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : List[str]=2 , UpperCamelCase : int=1 , UpperCamelCase : Optional[Any]=0 , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : str = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Dict = is_training __UpperCAmelCase : Dict = use_labels __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[Any] = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : List[Any] = max_position_embeddings __UpperCAmelCase : Any = eos_token_id __UpperCAmelCase : Optional[int] = pad_token_id __UpperCAmelCase : List[str] = bos_token_id def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __UpperCAmelCase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCAmelCase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCAmelCase : Any = prepare_pegasus_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, inputs_dict def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : Tuple = model_class_name(UpperCamelCase ) __UpperCAmelCase : List[Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : int = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Tuple = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Dict = model.decode(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : int = model_class_name(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : Dict = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : int = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : List[str] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Union[str, Any] = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Any=None , ) -> Dict: '''simple docstring''' if attention_mask is None: __UpperCAmelCase : Optional[int] = np.not_equal(_UpperCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __UpperCAmelCase : Dict = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __a = True __a = False __a = False __a = False def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = FlaxPegasusModelTester(self ) __UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = model_class(UpperCamelCase ) @jax.jit def encode_jitted(UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]=None , **UpperCamelCase : List[str] ): return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Tuple = encode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : Optional[int] = encode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : int = model_class(UpperCamelCase ) __UpperCAmelCase : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __UpperCAmelCase : Any = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ): return model.decode( decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : str = decode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCamelCase ) __UpperCAmelCase : Optional[int] = np.ones((1, 1) ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : List[Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCAmelCase : List[str] = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""np""" , truncation=UpperCamelCase , max_length=512 , padding=UpperCamelCase ) __UpperCAmelCase : int = model.generate(**UpperCamelCase , num_beams=2 ).sequences __UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) assert tgt_text == decoded
320
1
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = LEDTokenizer __a = LEDTokenizerFast __a = True def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' super().setUp() __UpperCAmelCase : Tuple = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] __UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) __UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} __UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase ) ) def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : List[str] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : str , UpperCamelCase : Any ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self : str ): '''simple docstring''' return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCamelCase , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIn("""input_ids""" , UpperCamelCase ) self.assertIn("""attention_mask""" , UpperCamelCase ) self.assertNotIn("""labels""" , UpperCamelCase ) self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : str = tokenizer( ["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual(batch.input_ids.shape , (2, 5_122) ) @require_torch def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""] __UpperCAmelCase : int = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Tuple = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Optional[Any] = inputs["""input_ids"""] __UpperCAmelCase : List[str] = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""] __UpperCAmelCase : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , padding=UpperCamelCase ) __UpperCAmelCase : str = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]] __UpperCAmelCase : List[Any] = tokenizer.pad(UpperCamelCase ) self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Any = """A, <mask> AllenNLP sentence.""" __UpperCAmelCase : Dict = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) __UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) __UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) __UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
320
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : List[str] = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } UpperCAmelCase : List[str] = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = EfficientNetConfig() __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""hidden_dim"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""] __UpperCAmelCase : str = CONFIG_MAP[model_name]["""depth_coef"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""dropout_rate"""] __UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""] __UpperCAmelCase : int = """huggingface/label-files""" __UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json""" __UpperCAmelCase : str = 1_0_0_0 __UpperCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : Dict = idalabel __UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" __UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im def lowerCamelCase ( _UpperCamelCase : Any ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : List[str] = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , ) return preprocessor def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] __UpperCAmelCase : str = sorted(set(_UpperCamelCase ) ) __UpperCAmelCase : Optional[int] = len(_UpperCamelCase ) __UpperCAmelCase : Any = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )} __UpperCAmelCase : Any = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: __UpperCAmelCase : List[str] = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) __UpperCAmelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: __UpperCAmelCase : Optional[Any] = """efficientnet.""" + item[1] __UpperCAmelCase : Tuple = """classifier.weight""" __UpperCAmelCase : Optional[int] = """classifier.bias""" return key_mapping def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Tuple: '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue __UpperCAmelCase : List[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: __UpperCAmelCase : int = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __UpperCAmelCase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __UpperCAmelCase : List[str] = torch.from_numpy(np.transpose(_UpperCamelCase ) ) else: __UpperCAmelCase : Tuple = torch.from_numpy(_UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCamelCase ) @torch.no_grad() def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = model_classes[model_name]( include_top=_UpperCamelCase , weights="""imagenet""" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , ) __UpperCAmelCase : List[str] = original_model.trainable_variables __UpperCAmelCase : List[Any] = original_model.non_trainable_variables __UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __UpperCAmelCase : int = param.numpy() __UpperCAmelCase : Dict = list(tf_params.keys() ) # Load HuggingFace model __UpperCAmelCase : Optional[Any] = get_efficientnet_config(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(_UpperCamelCase ).eval() __UpperCAmelCase : Any = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) __UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase ) replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Initialize preprocessor and preprocess input image __UpperCAmelCase : List[Any] = convert_image_processor(_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[int] = hf_model(**_UpperCamelCase ) __UpperCAmelCase : Any = outputs.logits.detach().numpy() # Original model inference __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __UpperCAmelCase : Optional[Any] = image.img_to_array(_UpperCamelCase ) __UpperCAmelCase : Tuple = np.expand_dims(_UpperCamelCase , axis=0 ) __UpperCAmelCase : str = original_model.predict(_UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCamelCase ): os.mkdir(_UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCamelCase ) preprocessor.save_pretrained(_UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) __UpperCAmelCase : List[str] = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_UpperCamelCase ) hf_model.push_to_hub(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') UpperCAmelCase : Any = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
320
1
"""simple docstring""" import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase : List[Any] = { 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json', # See all BART models at https://huggingface.co/models?filter=bart } class lowerCamelCase__ ( A ): """simple docstring""" __a = """bart""" __a = ["""past_key_values"""] __a = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : int , UpperCamelCase : List[str]=50_265 , UpperCamelCase : Any=1_024 , UpperCamelCase : List[str]=12 , UpperCamelCase : List[Any]=4_096 , UpperCamelCase : str=16 , UpperCamelCase : Any=12 , UpperCamelCase : Optional[Any]=4_096 , UpperCamelCase : Tuple=16 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : int=0.0 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=1_024 , UpperCamelCase : str=0.1 , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : int=0.0 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Optional[Any]=0.0 , UpperCamelCase : Tuple=False , UpperCamelCase : List[Any]=True , UpperCamelCase : Any=3 , UpperCamelCase : int=1 , UpperCamelCase : str=0 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : str=True , UpperCamelCase : Any=2 , UpperCamelCase : Tuple=2 , **UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Dict = vocab_size __UpperCAmelCase : Dict = max_position_embeddings __UpperCAmelCase : int = d_model __UpperCAmelCase : str = encoder_ffn_dim __UpperCAmelCase : Tuple = encoder_layers __UpperCAmelCase : List[str] = encoder_attention_heads __UpperCAmelCase : Optional[Any] = decoder_ffn_dim __UpperCAmelCase : List[Any] = decoder_layers __UpperCAmelCase : List[str] = decoder_attention_heads __UpperCAmelCase : Optional[int] = dropout __UpperCAmelCase : List[str] = attention_dropout __UpperCAmelCase : str = activation_dropout __UpperCAmelCase : Optional[int] = activation_function __UpperCAmelCase : Union[str, Any] = init_std __UpperCAmelCase : str = encoder_layerdrop __UpperCAmelCase : Optional[int] = decoder_layerdrop __UpperCAmelCase : Dict = classifier_dropout __UpperCAmelCase : Any = use_cache __UpperCAmelCase : Any = encoder_layers __UpperCAmelCase : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=UpperCamelCase , pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , **UpperCamelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCamelCase ): __UpperCAmelCase : Tuple = self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' """The config can simply be saved and uploaded again to be fixed.""" ) class lowerCamelCase__ ( A ): """simple docstring""" @property def lowerCamelCase__ ( self : int ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __UpperCAmelCase : str = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: __UpperCAmelCase : Tuple = {0: """batch"""} __UpperCAmelCase : List[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: __UpperCAmelCase : List[Any] = {0: """batch""", 1: """decoder_sequence"""} __UpperCAmelCase : Optional[int] = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(UpperCamelCase , direction="""inputs""" ) elif self.task == "causal-lm": # TODO: figure this case out. __UpperCAmelCase : Optional[Any] = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: __UpperCAmelCase ,__UpperCAmelCase : Any = self.num_layers for i in range(UpperCamelCase ): __UpperCAmelCase : Any = {0: """batch""", 2: """past_sequence + sequence"""} __UpperCAmelCase : List[str] = {0: """batch""", 2: """past_sequence + sequence"""} else: __UpperCAmelCase : List[Any] = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}), ("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}), ] ) return common_inputs @property def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __UpperCAmelCase : List[Any] = super().outputs else: __UpperCAmelCase : Union[str, Any] = super(UpperCamelCase , self ).outputs if self.use_past: __UpperCAmelCase ,__UpperCAmelCase : Any = self.num_layers for i in range(UpperCamelCase ): __UpperCAmelCase : List[Any] = {0: """batch""", 2: """past_sequence + sequence"""} __UpperCAmelCase : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""} return common_outputs def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[TensorType] = None , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Generate decoder inputs __UpperCAmelCase : int = seq_length if not self.use_past else 1 __UpperCAmelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[Any] = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} __UpperCAmelCase : List[str] = dict(**UpperCamelCase , **UpperCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = common_inputs["""input_ids"""].shape __UpperCAmelCase : Any = common_inputs["""decoder_input_ids"""].shape[1] __UpperCAmelCase ,__UpperCAmelCase : List[Any] = self.num_attention_heads __UpperCAmelCase : Any = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __UpperCAmelCase : List[Any] = decoder_seq_length + 3 __UpperCAmelCase : Optional[int] = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __UpperCAmelCase : Any = torch.cat( [common_inputs["""decoder_attention_mask"""], torch.ones(UpperCamelCase , UpperCamelCase )] , dim=1 ) __UpperCAmelCase : Optional[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.num_layers __UpperCAmelCase : Optional[Any] = min(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = max(UpperCamelCase , UpperCamelCase ) - min_num_layers __UpperCAmelCase : Dict = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder""" for _ in range(UpperCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase ), ) ) # TODO: test this. __UpperCAmelCase : List[Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape for _ in range(UpperCamelCase , UpperCamelCase ): common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase )) ) return common_inputs def lowerCamelCase__ ( self : Any , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[TensorType] = None , ): '''simple docstring''' __UpperCAmelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __UpperCAmelCase ,__UpperCAmelCase : Any = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __UpperCAmelCase : Any = seqlen + 2 __UpperCAmelCase ,__UpperCAmelCase : List[Any] = self.num_layers __UpperCAmelCase ,__UpperCAmelCase : int = self.num_attention_heads __UpperCAmelCase : List[str] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __UpperCAmelCase : int = common_inputs["""attention_mask"""].dtype __UpperCAmelCase : List[Any] = torch.cat( [common_inputs["""attention_mask"""], torch.ones(UpperCamelCase , UpperCamelCase , dtype=UpperCamelCase )] , dim=1 ) __UpperCAmelCase : List[Any] = [ (torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase )) for _ in range(UpperCamelCase ) ] return common_inputs def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[TensorType] = None , ): '''simple docstring''' __UpperCAmelCase : Dict = compute_effective_axis_dimension( UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __UpperCAmelCase : Any = tokenizer.num_special_tokens_to_add(UpperCamelCase ) __UpperCAmelCase : int = compute_effective_axis_dimension( UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase ) # Generate dummy inputs according to compute batch and sequence __UpperCAmelCase : Optional[int] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size __UpperCAmelCase : List[str] = dict(tokenizer(UpperCamelCase , return_tensors=UpperCamelCase ) ) return common_inputs def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __UpperCAmelCase : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase ) elif self.task == "causal-lm": __UpperCAmelCase : List[Any] = self._generate_dummy_inputs_for_causal_lm( UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase ) else: __UpperCAmelCase : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase ) return common_inputs def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __UpperCAmelCase : Union[str, Any] = super()._flatten_past_key_values_(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) else: __UpperCAmelCase : str = super(UpperCamelCase , self )._flatten_past_key_values_( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
320
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""keras_nlp"""] def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ): '''simple docstring''' requires_backends(self , ["""keras_nlp"""] )
320
1
"""simple docstring""" from __future__ import annotations def lowerCamelCase ( _UpperCamelCase : list[float] , _UpperCamelCase : list[float] ) -> float: '''simple docstring''' __UpperCAmelCase : Tuple = sorted(numsa + numsa ) __UpperCAmelCase ,__UpperCAmelCase : Dict = divmod(len(_UpperCamelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : List[Any] = [float(x) for x in input('Enter the elements of first array: ').split()] UpperCAmelCase : Optional[int] = [float(x) for x in input('Enter the elements of second array: ').split()] print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
320
"""simple docstring""" UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_UpperCamelCase ) __UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data ) __UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0 if padding_needed: # The padding that will be added later __UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6) else: __UpperCAmelCase : List[str] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode() + padding ) def lowerCamelCase ( _UpperCamelCase : str ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Tuple = ( """argument should be a bytes-like object or ASCII string, """ f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_UpperCamelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_UpperCamelCase , _UpperCamelCase ): try: __UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) __UpperCAmelCase : str = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one __UpperCAmelCase : List[str] = encoded_data[:-padding] __UpperCAmelCase : int = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: __UpperCAmelCase : Optional[Any] = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data ) __UpperCAmelCase : List[Any] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_UpperCamelCase ) , 8 ) ] return bytes(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
320
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor UpperCAmelCase : List[Any] = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : List[Any] , *UpperCamelCase : Dict , **UpperCamelCase : int ): '''simple docstring''' warnings.warn( """The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use DeformableDetrImageProcessor instead.""" , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
320
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor UpperCAmelCase : str = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ): '''simple docstring''' warnings.warn( """The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
320
1
"""simple docstring""" import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def lowerCamelCase ( _UpperCamelCase : str ) -> Optional[int]: '''simple docstring''' if isinstance(_UpperCamelCase , collections.abc.Iterable ): return x return (x, x) @require_flax class lowerCamelCase__ : """simple docstring""" def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self : int ): '''simple docstring''' pass def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' pass def lowerCamelCase__ ( self : str , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : float ): '''simple docstring''' __UpperCAmelCase : Any = np.abs((a - b) ).max() self.assertLessEqual(UpperCamelCase , UpperCamelCase , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple=None , **UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : int = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel(UpperCamelCase ) __UpperCAmelCase : str = model(input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Any=None , **UpperCamelCase : Dict ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = self.get_vision_text_model(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model} __UpperCAmelCase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase ) __UpperCAmelCase : int = model(input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Union[str, Any]=None , **UpperCamelCase : Optional[int] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : List[str] = self.get_vision_text_model(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = {"""vision_model""": vision_model, """text_model""": text_model} __UpperCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase ) __UpperCAmelCase : Optional[int] = model(input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase ) __UpperCAmelCase : Optional[int] = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase ) __UpperCAmelCase : List[Any] = model(input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase ) __UpperCAmelCase : Any = after_output[0] __UpperCAmelCase : List[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase , 1e-3 ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=None , **UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : int = self.get_vision_text_model(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model} __UpperCAmelCase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase ) __UpperCAmelCase : List[str] = model( input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase , output_attentions=UpperCamelCase ) __UpperCAmelCase : Any = output.vision_model_output.attentions self.assertEqual(len(UpperCamelCase ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __UpperCAmelCase : Dict = to_atuple(vision_model.config.image_size ) __UpperCAmelCase : Any = to_atuple(vision_model.config.patch_size ) __UpperCAmelCase : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __UpperCAmelCase : List[str] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __UpperCAmelCase : Union[str, Any] = output.text_model_output.attentions self.assertEqual(len(UpperCamelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Dict ): '''simple docstring''' pt_model.to(UpperCamelCase ) pt_model.eval() # prepare inputs __UpperCAmelCase : List[Any] = inputs_dict __UpperCAmelCase : List[str] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): __UpperCAmelCase : str = pt_model(**UpperCamelCase ).to_tuple() __UpperCAmelCase : List[Any] = fx_model(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(UpperCamelCase , pt_output.numpy() , 4e-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase , from_pt=UpperCamelCase ) __UpperCAmelCase : Dict = fx_model_loaded(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(UpperCamelCase , pt_output.numpy() , 4e-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(UpperCamelCase , from_flax=UpperCamelCase ) pt_model_loaded.to(UpperCamelCase ) pt_model_loaded.eval() with torch.no_grad(): __UpperCAmelCase : int = pt_model_loaded(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(UpperCamelCase , pt_output_loaded.numpy() , 4e-2 ) def lowerCamelCase__ ( self : int , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[Any] = VisionTextDualEncoderModel(UpperCamelCase ) __UpperCAmelCase : List[str] = FlaxVisionTextDualEncoderModel(UpperCamelCase ) __UpperCAmelCase : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCamelCase ) __UpperCAmelCase : List[Any] = fx_state self.check_pt_flax_equivalence(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Any = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : int = VisionTextDualEncoderModel(UpperCamelCase ) __UpperCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel(UpperCamelCase ) __UpperCAmelCase : Optional[int] = load_flax_weights_in_pytorch_model(UpperCamelCase , fx_model.params ) self.check_pt_flax_equivalence(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() self.check_save_load(**UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**UpperCamelCase ) @is_pt_flax_cross_test def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Dict = self.prepare_config_and_inputs() __UpperCAmelCase : Tuple = config_inputs_dict.pop("""vision_config""" ) __UpperCAmelCase : Optional[Any] = config_inputs_dict.pop("""text_config""" ) __UpperCAmelCase : Dict = config_inputs_dict self.check_equivalence_pt_to_flax(UpperCamelCase , UpperCamelCase , UpperCamelCase ) self.check_equivalence_flax_to_pt(UpperCamelCase , UpperCamelCase , UpperCamelCase ) @slow def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Tuple = self.get_pretrained_model_and_inputs() __UpperCAmelCase : str = model_a(**UpperCamelCase ) __UpperCAmelCase : int = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(UpperCamelCase ) __UpperCAmelCase : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Dict = model_a(**UpperCamelCase ) __UpperCAmelCase : Any = after_outputs[0] __UpperCAmelCase : Tuple = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase , 1e-5 ) @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=UpperCamelCase , text_from_pt=UpperCamelCase , ) __UpperCAmelCase : List[Any] = 13 __UpperCAmelCase : Union[str, Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __UpperCAmelCase : Optional[int] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __UpperCAmelCase : Tuple = random_attention_mask([batch_size, 4] ) __UpperCAmelCase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = FlaxViTModel(UpperCamelCase ) __UpperCAmelCase : Dict = FlaxBertModel(UpperCamelCase ) return vision_model, text_model def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = FlaxViTModelTester(self ) __UpperCAmelCase : Optional[Any] = FlaxBertModelTester(self ) __UpperCAmelCase : Optional[Any] = vit_model_tester.prepare_config_and_inputs() __UpperCAmelCase : Optional[int] = bert_model_tester.prepare_config_and_inputs() __UpperCAmelCase ,__UpperCAmelCase : Dict = vision_config_and_inputs __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=UpperCamelCase , text_from_pt=UpperCamelCase , ) __UpperCAmelCase : str = 13 __UpperCAmelCase : Tuple = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __UpperCAmelCase : Optional[int] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __UpperCAmelCase : Dict = random_attention_mask([batch_size, 4] ) __UpperCAmelCase : Union[str, Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Dict , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FlaxCLIPVisionModel(UpperCamelCase ) __UpperCAmelCase : List[str] = FlaxBertModel(UpperCamelCase ) return vision_model, text_model def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Any = FlaxCLIPVisionModelTester(self ) __UpperCAmelCase : Any = FlaxBertModelTester(self ) __UpperCAmelCase : str = clip_model_tester.prepare_config_and_inputs() __UpperCAmelCase : Optional[Any] = bert_model_tester.prepare_config_and_inputs() __UpperCAmelCase ,__UpperCAmelCase : int = vision_config_and_inputs __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[Any] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 ) __UpperCAmelCase : Tuple = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) __UpperCAmelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __UpperCAmelCase : List[Any] = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=UpperCamelCase , padding=UpperCamelCase , return_tensors="""np""" ) __UpperCAmelCase : List[Any] = model(**UpperCamelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __UpperCAmelCase : int = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , UpperCamelCase , atol=1e-3 ) )
320
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = LEDTokenizer __a = LEDTokenizerFast __a = True def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' super().setUp() __UpperCAmelCase : Tuple = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] __UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) __UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} __UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase ) ) def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : List[str] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : str , UpperCamelCase : Any ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self : str ): '''simple docstring''' return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCamelCase , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIn("""input_ids""" , UpperCamelCase ) self.assertIn("""attention_mask""" , UpperCamelCase ) self.assertNotIn("""labels""" , UpperCamelCase ) self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : str = tokenizer( ["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual(batch.input_ids.shape , (2, 5_122) ) @require_torch def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""] __UpperCAmelCase : int = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Tuple = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Optional[Any] = inputs["""input_ids"""] __UpperCAmelCase : List[str] = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""] __UpperCAmelCase : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , padding=UpperCamelCase ) __UpperCAmelCase : str = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]] __UpperCAmelCase : List[Any] = tokenizer.pad(UpperCamelCase ) self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Any = """A, <mask> AllenNLP sentence.""" __UpperCAmelCase : Dict = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) __UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) __UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) __UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
320
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer UpperCAmelCase : Tuple = logging.get_logger(__name__) UpperCAmelCase : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase : Dict = [ 'small', 'small-base', 'medium', 'medium-base', 'intermediate', 'intermediate-base', 'large', 'large-base', 'xlarge', 'xlarge-base', ] UpperCAmelCase : Optional[int] = { 'vocab_file': { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt', 'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt', 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt', 'funnel-transformer/medium-base': ( 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt' ), 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt', 'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt', 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt', 'funnel-transformer/xlarge-base': ( 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json', 'funnel-transformer/small-base': ( 'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json' ), 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json', 'funnel-transformer/medium-base': ( 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json' ), 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json', 'funnel-transformer/large-base': ( 'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json' ), 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json', 'funnel-transformer/xlarge-base': ( 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json' ), }, } UpperCAmelCase : Optional[int] = {F"funnel-transformer/{name}": 512 for name in _model_names} UpperCAmelCase : Dict = {F"funnel-transformer/{name}": {'do_lower_case': True} for name in _model_names} class lowerCamelCase__ ( A ): """simple docstring""" __a = VOCAB_FILES_NAMES __a = PRETRAINED_VOCAB_FILES_MAP __a = PRETRAINED_INIT_CONFIGURATION __a = FunnelTokenizer __a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = 2 def __init__( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Union[str, Any]="<unk>" , UpperCamelCase : Any="<sep>" , UpperCamelCase : Union[str, Any]="<pad>" , UpperCamelCase : str="<cls>" , UpperCamelCase : Optional[Any]="<mask>" , UpperCamelCase : int="<s>" , UpperCamelCase : List[Any]="</s>" , UpperCamelCase : List[str]=True , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Tuple=None , UpperCamelCase : Dict="##" , **UpperCamelCase : str , ): '''simple docstring''' super().__init__( UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , clean_text=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , wordpieces_prefix=UpperCamelCase , **UpperCamelCase , ) __UpperCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , UpperCamelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" , UpperCamelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , UpperCamelCase ) != tokenize_chinese_chars ): __UpperCAmelCase : List[str] = getattr(UpperCamelCase , normalizer_state.pop("""type""" ) ) __UpperCAmelCase : Dict = do_lower_case __UpperCAmelCase : int = strip_accents __UpperCAmelCase : Optional[int] = tokenize_chinese_chars __UpperCAmelCase : Optional[int] = normalizer_class(**UpperCamelCase ) __UpperCAmelCase : Optional[int] = do_lower_case def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : List[str]=None ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [self.sep_token_id] __UpperCAmelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self : str , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ): '''simple docstring''' __UpperCAmelCase : List[str] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase ) return tuple(UpperCamelCase )
320
"""simple docstring""" from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class lowerCamelCase__ : """simple docstring""" def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : List[Any]=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : int=99 , UpperCamelCase : Any=[1, 1, 2] , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Union[str, Any]=8 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=0.0 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Any=3 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : str=None , UpperCamelCase : Tuple=False , ): '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : int = batch_size __UpperCAmelCase : str = seq_length __UpperCAmelCase : Optional[Any] = is_training __UpperCAmelCase : Optional[Any] = use_input_mask __UpperCAmelCase : Tuple = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : Optional[int] = block_sizes __UpperCAmelCase : Optional[Any] = num_decoder_layers __UpperCAmelCase : Union[str, Any] = d_model __UpperCAmelCase : Dict = n_head __UpperCAmelCase : Optional[Any] = d_head __UpperCAmelCase : Dict = d_inner __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout __UpperCAmelCase : List[Any] = attention_dropout __UpperCAmelCase : str = activation_dropout __UpperCAmelCase : Union[str, Any] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : str = 2 __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : List[Any] = num_choices __UpperCAmelCase : Any = scope __UpperCAmelCase : Dict = initializer_std # Used in the tests to check the size of the first attention layer __UpperCAmelCase : Dict = n_head # Used in the tests to check the size of the first hidden state __UpperCAmelCase : Dict = self.d_model # Used in the tests to check the number of output hidden states/attentions __UpperCAmelCase : Dict = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: __UpperCAmelCase : List[Any] = self.num_hidden_layers + 2 def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[str] = None if self.use_input_mask: __UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : str = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : List[str] = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = [input_ids, input_mask] __UpperCAmelCase : Dict = model(UpperCamelCase ) __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : Any = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) __UpperCAmelCase : int = [input_ids, input_mask] __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) __UpperCAmelCase : List[Any] = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : str = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , ): '''simple docstring''' __UpperCAmelCase : Tuple = TFFunnelForPreTraining(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : int = TFFunnelForMaskedLM(config=UpperCamelCase ) __UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_labels __UpperCAmelCase : Optional[Any] = TFFunnelForSequenceClassification(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_choices __UpperCAmelCase : str = TFFunnelForMultipleChoice(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : str = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : int = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : List[str] = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : str = TFFunnelForTokenClassification(config=UpperCamelCase ) __UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Any = TFFunnelForQuestionAnswering(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Any = model(UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : Dict = config_and_inputs __UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowerCamelCase__ ( A , A , unittest.TestCase ): """simple docstring""" __a = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) __a = ( { """feature-extraction""": (TFFunnelBaseModel, TFFunnelModel), """fill-mask""": TFFunnelForMaskedLM, """question-answering""": TFFunnelForQuestionAnswering, """text-classification""": TFFunnelForSequenceClassification, """token-classification""": TFFunnelForTokenClassification, """zero-shot""": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) __a = False __a = False def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModelTester(self ) __UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) @require_tf class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) __a = False __a = False def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : List[str] = TFFunnelModelTester(self , base=UpperCamelCase ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
320
1
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
320
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = 0 while b > 0: if b & 1: __UpperCAmelCase : int = ((res % c) + (a % c)) % c a += a b >>= 1 return res
320
1
"""simple docstring""" import os from pathlib import Path def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' from torch.utils.cpp_extension import load __UpperCAmelCase : Any = Path(_UpperCamelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr""" __UpperCAmelCase : List[str] = [ root / filename for filename in [ """vision.cpp""", os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ), os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ), ] ] load( """MultiScaleDeformableAttention""" , _UpperCamelCase , with_cuda=_UpperCamelCase , extra_include_paths=[str(_UpperCamelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[ """-DCUDA_HAS_FP16=1""", """-D__CUDA_NO_HALF_OPERATORS__""", """-D__CUDA_NO_HALF_CONVERSIONS__""", """-D__CUDA_NO_HALF2_OPERATORS__""", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
320
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""image_processor""", """tokenizer"""] __a = """AutoImageProcessor""" __a = """AutoTokenizer""" def __init__( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ): '''simple docstring''' super().__init__(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : str = self.image_processor def __call__( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=None , **UpperCamelCase : Optional[int] ): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: __UpperCAmelCase : List[str] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if images is not None: __UpperCAmelCase : Optional[Any] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: __UpperCAmelCase : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def lowerCamelCase__ ( self : List[str] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
320
1
"""simple docstring""" import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets UpperCAmelCase : Optional[int] = datasets.logging.get_logger(__name__) UpperCAmelCase : Any = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' UpperCAmelCase : str = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' UpperCAmelCase : Dict = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : List[str]=False , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : List[Any]=True , _UpperCamelCase : List[str]=False , _UpperCamelCase : Optional[int]="dummy_doc" ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = {doc: key_lines} __UpperCAmelCase : Union[str, Any] = {doc: sys_lines} __UpperCAmelCase : List[str] = {} __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Optional[int] = 0 __UpperCAmelCase : List[str] = 0 __UpperCAmelCase : Optional[Any] = 0 __UpperCAmelCase : List[Any] = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase ,__UpperCAmelCase : List[Any] = reader.get_doc_mentions(_UpperCamelCase , key_doc_lines[doc] , _UpperCamelCase ) key_singletons_num += singletons_num if NP_only or min_span: __UpperCAmelCase : List[str] = reader.set_annotated_parse_trees(_UpperCamelCase , key_doc_lines[doc] , _UpperCamelCase , _UpperCamelCase ) __UpperCAmelCase ,__UpperCAmelCase : List[str] = reader.get_doc_mentions(_UpperCamelCase , sys_doc_lines[doc] , _UpperCamelCase ) sys_singletons_num += singletons_num if NP_only or min_span: __UpperCAmelCase : Dict = reader.set_annotated_parse_trees(_UpperCamelCase , key_doc_lines[doc] , _UpperCamelCase , _UpperCamelCase ) if remove_nested: __UpperCAmelCase ,__UpperCAmelCase : Any = reader.remove_nested_coref_mentions(_UpperCamelCase , _UpperCamelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters __UpperCAmelCase ,__UpperCAmelCase : Dict = reader.remove_nested_coref_mentions(_UpperCamelCase , _UpperCamelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters __UpperCAmelCase : Union[str, Any] = reader.get_mention_assignments(_UpperCamelCase , _UpperCamelCase ) __UpperCAmelCase : Optional[Any] = reader.get_mention_assignments(_UpperCamelCase , _UpperCamelCase ) __UpperCAmelCase : Dict = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( """Number of removed nested coreferring mentions in the key """ f'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' ) logger.info( """Number of resulting singleton clusters in the key """ f'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' ) if not keep_singletons: logger.info( f'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' """files, respectively""" ) return doc_coref_infos def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Any = get_coref_infos(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) __UpperCAmelCase : List[Any] = {} __UpperCAmelCase : Union[str, Any] = 0 __UpperCAmelCase : str = 0 for name, metric in metrics: __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = evaluator.evaluate_documents(_UpperCamelCase , _UpperCamelCase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f'''{name}/recall''': recall, f'''{name}/precision''': precision, f'''{name}/f1''': fa} ) logger.info( name.ljust(1_0 ) , f'''Recall: {recall * 1_0_0:.2f}''' , f''' Precision: {precision * 1_0_0:.2f}''' , f''' F1: {fa * 1_0_0:.2f}''' , ) if conll_subparts_num == 3: __UpperCAmelCase : Tuple = (conll / 3) * 1_0_0 logger.info(f'''CoNLL score: {conll:.2f}''' ) output_scores.update({"""conll_score""": conll} ) return output_scores def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = False for line in key_lines: if not line.startswith("""#""" ): if len(line.split() ) > 6: __UpperCAmelCase : str = line.split()[5] if not parse_col == "-": __UpperCAmelCase : List[str] = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): """simple docstring""" def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Sequence(datasets.Value("""string""" ) ), } ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[ """https://github.com/ns-moosavi/coval""", """https://www.aclweb.org/anthology/P16-1060""", """http://www.conll.cemantix.org/2012/data.html""", ] , ) def lowerCamelCase__ ( self : int , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=True , UpperCamelCase : List[Any]=False , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False ): '''simple docstring''' __UpperCAmelCase : Tuple = [ ("""mentions""", evaluator.mentions), ("""muc""", evaluator.muc), ("""bcub""", evaluator.b_cubed), ("""ceafe""", evaluator.ceafe), ("""lea""", evaluator.lea), ] if min_span: __UpperCAmelCase : str = util.check_gold_parse_annotation(UpperCamelCase ) if not has_gold_parse: raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" __UpperCAmelCase : int = evaluate( key_lines=UpperCamelCase , sys_lines=UpperCamelCase , metrics=UpperCamelCase , NP_only=UpperCamelCase , remove_nested=UpperCamelCase , keep_singletons=UpperCamelCase , min_span=UpperCamelCase , ) return score
320
"""simple docstring""" from __future__ import annotations def lowerCamelCase ( _UpperCamelCase : list[float] , _UpperCamelCase : list[float] ) -> float: '''simple docstring''' __UpperCAmelCase : Tuple = sorted(numsa + numsa ) __UpperCAmelCase ,__UpperCAmelCase : Dict = divmod(len(_UpperCamelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : List[Any] = [float(x) for x in input('Enter the elements of first array: ').split()] UpperCAmelCase : Optional[int] = [float(x) for x in input('Enter the elements of second array: ').split()] print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
320
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : int = logging.get_logger(__name__) def lowerCamelCase ( _UpperCamelCase : Any ) -> Tuple: '''simple docstring''' __UpperCAmelCase : List[str] = DPTConfig(embedding_type="""hybrid""" ) if "large" in checkpoint_url: __UpperCAmelCase : Optional[int] = 1_0_2_4 __UpperCAmelCase : List[Any] = 4_0_9_6 __UpperCAmelCase : int = 2_4 __UpperCAmelCase : List[str] = 1_6 __UpperCAmelCase : Optional[Any] = [5, 1_1, 1_7, 2_3] __UpperCAmelCase : Optional[Any] = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4] __UpperCAmelCase : Union[str, Any] = (1, 3_8_4, 3_8_4) if "nyu" or "midas" in checkpoint_url: __UpperCAmelCase : List[str] = 7_6_8 __UpperCAmelCase : Dict = [1, 1, 1, 0.5] __UpperCAmelCase : Any = [2_5_6, 5_1_2, 7_6_8, 7_6_8] __UpperCAmelCase : List[str] = 1_5_0 __UpperCAmelCase : Union[str, Any] = 1_6 __UpperCAmelCase : Optional[Any] = (1, 3_8_4, 3_8_4) __UpperCAmelCase : Optional[Any] = False __UpperCAmelCase : Dict = """project""" if "ade" in checkpoint_url: __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : Dict = 7_6_8 __UpperCAmelCase : Optional[Any] = [1, 1, 1, 0.5] __UpperCAmelCase : List[str] = 1_5_0 __UpperCAmelCase : Union[str, Any] = 1_6 __UpperCAmelCase : List[str] = """huggingface/label-files""" __UpperCAmelCase : Optional[int] = """ade20k-id2label.json""" __UpperCAmelCase : int = json.load(open(cached_download(hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) ) , """r""" ) ) __UpperCAmelCase : Optional[int] = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : str = idalabel __UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} __UpperCAmelCase : List[str] = [1, 1_5_0, 4_8_0, 4_8_0] return config, expected_shape def lowerCamelCase ( _UpperCamelCase : int ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : int = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(_UpperCamelCase , _UpperCamelCase ) def lowerCamelCase ( _UpperCamelCase : str ) -> List[str]: '''simple docstring''' if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): __UpperCAmelCase : List[str] = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: __UpperCAmelCase : str = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: __UpperCAmelCase : Optional[Any] = name.replace("""patch_embed""" , """""" ) if "pos_embed" in name: __UpperCAmelCase : Dict = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: __UpperCAmelCase : Optional[int] = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: __UpperCAmelCase : Dict = name.replace("""proj""" , """projection""" ) if "blocks" in name: __UpperCAmelCase : Optional[int] = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: __UpperCAmelCase : Dict = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __UpperCAmelCase : Dict = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name and "backbone" not in name: __UpperCAmelCase : Dict = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name and "backbone" not in name: __UpperCAmelCase : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: __UpperCAmelCase : Tuple = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: __UpperCAmelCase : Tuple = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: __UpperCAmelCase : Union[str, Any] = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: __UpperCAmelCase : Union[str, Any] = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: __UpperCAmelCase : str = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: __UpperCAmelCase : Dict = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: __UpperCAmelCase : Optional[Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 __UpperCAmelCase : List[Any] = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: __UpperCAmelCase : Dict = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: __UpperCAmelCase : Tuple = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: __UpperCAmelCase : List[Any] = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: __UpperCAmelCase : Optional[int] = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: __UpperCAmelCase : Dict = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: __UpperCAmelCase : Optional[int] = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: __UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: __UpperCAmelCase : int = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: __UpperCAmelCase : Any = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: __UpperCAmelCase : List[Any] = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: __UpperCAmelCase : Dict = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: __UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: __UpperCAmelCase : Tuple = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: __UpperCAmelCase : Optional[int] = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: __UpperCAmelCase : int = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: __UpperCAmelCase : List[str] = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: __UpperCAmelCase : str = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: __UpperCAmelCase : str = name.replace("""bn""" , """batch_norm""" ) if "head" in name: __UpperCAmelCase : int = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: __UpperCAmelCase : Optional[int] = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: __UpperCAmelCase : Tuple = name.replace("""auxlayer""" , """auxiliary_head.head""" ) if "backbone" in name: __UpperCAmelCase : Dict = name.replace("""backbone""" , """backbone.bit.encoder""" ) if ".." in name: __UpperCAmelCase : Optional[Any] = name.replace("""..""" , """.""" ) if "stem.conv" in name: __UpperCAmelCase : str = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: __UpperCAmelCase : Optional[int] = name.replace("""blocks""" , """layers""" ) if "convolution" in name and "backbone" in name: __UpperCAmelCase : Optional[Any] = name.replace("""convolution""" , """conv""" ) if "layer" in name and "backbone" in name: __UpperCAmelCase : str = name.replace("""layer""" , """layers""" ) if "backbone.bit.encoder.bit" in name: __UpperCAmelCase : Union[str, Any] = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" ) if "embedder.conv" in name: __UpperCAmelCase : List[Any] = name.replace("""embedder.conv""" , """embedder.convolution""" ) if "backbone.bit.encoder.stem.norm" in name: __UpperCAmelCase : List[Any] = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" ) return name def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] ) -> List[Any]: '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __UpperCAmelCase : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) __UpperCAmelCase : str = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict __UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :] __UpperCAmelCase : Tuple = in_proj_bias[: config.hidden_size] __UpperCAmelCase : List[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __UpperCAmelCase : Optional[int] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __UpperCAmelCase : List[Any] = in_proj_weight[ -config.hidden_size :, : ] __UpperCAmelCase : str = in_proj_bias[-config.hidden_size :] def lowerCamelCase ( ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" __UpperCAmelCase : Optional[int] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im @torch.no_grad() def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple ) -> str: '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : List[Any] = get_dpt_config(_UpperCamelCase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") __UpperCAmelCase : Dict = torch.load(_UpperCamelCase , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(_UpperCamelCase ) # rename keys for key in state_dict.copy().keys(): __UpperCAmelCase : Tuple = state_dict.pop(_UpperCamelCase ) __UpperCAmelCase : Optional[int] = val # read in qkv matrices read_in_q_k_v(_UpperCamelCase , _UpperCamelCase ) # load HuggingFace model __UpperCAmelCase : Any = DPTForSemanticSegmentation(_UpperCamelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(_UpperCamelCase ) model.load_state_dict(_UpperCamelCase ) model.eval() # Check outputs on an image __UpperCAmelCase : Optional[int] = 4_8_0 if """ade""" in checkpoint_url else 3_8_4 __UpperCAmelCase : Union[str, Any] = DPTImageProcessor(size=_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = prepare_img() __UpperCAmelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""pt""" ) # forward pass __UpperCAmelCase : int = model(**_UpperCamelCase ).logits if """ade""" in checkpoint_url else model(**_UpperCamelCase ).predicted_depth if show_prediction: __UpperCAmelCase : Optional[int] = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=_UpperCamelCase , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show() if pytorch_dump_folder_path is not None: Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_UpperCamelCase ) if push_to_hub: model.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) if __name__ == "__main__": UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) parser.add_argument( '--show_prediction', action='store_true', ) UpperCAmelCase : List[str] = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
320
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Optional[int] = tokenizer("""This is me""" , return_tensors="""pt""" ) __UpperCAmelCase : int = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) __UpperCAmelCase : Tuple = model.generate(**UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) __UpperCAmelCase : Tuple = model_reloaded.generate(**UpperCamelCase ) self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase ) ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Any = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(UpperCamelCase ): model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() model.save_pretrained(UpperCamelCase )
320
1
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): UpperCAmelCase : Optional[int] = 'pt' elif is_tf_available(): UpperCAmelCase : Tuple = 'tf' else: UpperCAmelCase : Tuple = 'jax' class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = PerceiverTokenizer __a = False def lowerCamelCase__ ( self : str ): '''simple docstring''' super().setUp() __UpperCAmelCase : List[str] = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" ) def lowerCamelCase__ ( self : int , **UpperCamelCase : Optional[int] ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict=False , UpperCamelCase : str=20 , UpperCamelCase : str=5 ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [] for i in range(len(UpperCamelCase ) ): try: __UpperCAmelCase : List[str] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) __UpperCAmelCase : Optional[Any] = list(filter(lambda UpperCamelCase : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , UpperCamelCase ) ) __UpperCAmelCase : List[str] = list(filter(lambda UpperCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase ) , UpperCamelCase ) ) if max_length is not None and len(UpperCamelCase ) > max_length: __UpperCAmelCase : int = toks[:max_length] if min_length is not None and len(UpperCamelCase ) < min_length and len(UpperCamelCase ) > 0: while len(UpperCamelCase ) < min_length: __UpperCAmelCase : Optional[Any] = toks + toks # toks_str = [t[1] for t in toks] __UpperCAmelCase : Union[str, Any] = [t[0] for t in toks] # Ensure consistency __UpperCAmelCase : Any = tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase ) if " " not in output_txt and len(UpperCamelCase ) > 1: __UpperCAmelCase : Dict = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase ) + """ """ + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase ) ) if with_prefix_space: __UpperCAmelCase : str = """ """ + output_txt __UpperCAmelCase : int = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) return output_txt, output_ids def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : str = self.perceiver_tokenizer __UpperCAmelCase : int = """Unicode €.""" __UpperCAmelCase : int = tokenizer(UpperCamelCase ) __UpperCAmelCase : Tuple = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded["""input_ids"""] , UpperCamelCase ) # decoding __UpperCAmelCase : Optional[int] = tokenizer.decode(UpperCamelCase ) self.assertEqual(UpperCamelCase , """[CLS]Unicode €.[SEP]""" ) __UpperCAmelCase : str = tokenizer("""e è é ê ë""" ) __UpperCAmelCase : int = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded["""input_ids"""] , UpperCamelCase ) # decoding __UpperCAmelCase : Tuple = tokenizer.decode(UpperCamelCase ) self.assertEqual(UpperCamelCase , """[CLS]e è é ê ë[SEP]""" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : List[str] = self.perceiver_tokenizer __UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] # fmt: off __UpperCAmelCase : Tuple = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on __UpperCAmelCase : Union[str, Any] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) if FRAMEWORK != "jax": __UpperCAmelCase : Any = list(batch.input_ids.numpy()[0] ) else: __UpperCAmelCase : List[str] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.perceiver_tokenizer __UpperCAmelCase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __UpperCAmelCase : List[Any] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn("""input_ids""" , UpperCamelCase ) self.assertIn("""attention_mask""" , UpperCamelCase ) self.assertNotIn("""decoder_input_ids""" , UpperCamelCase ) self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : str = self.perceiver_tokenizer __UpperCAmelCase : int = [ """Summary of the text.""", """Another summary.""", ] __UpperCAmelCase : Any = tokenizer( text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , truncation=UpperCamelCase , return_tensors=UpperCamelCase ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __UpperCAmelCase : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __UpperCAmelCase : List[str] = tempfile.mkdtemp() __UpperCAmelCase : Dict = """ He is very happy, UNwant\u00E9d,running""" __UpperCAmelCase : List[Any] = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) tokenizer.save_pretrained(UpperCamelCase ) __UpperCAmelCase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Optional[Any] = after_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) shutil.rmtree(UpperCamelCase ) __UpperCAmelCase : Dict = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __UpperCAmelCase : Optional[Any] = tempfile.mkdtemp() __UpperCAmelCase : Tuple = """ He is very happy, UNwant\u00E9d,running""" tokenizer.add_tokens(["""bim""", """bambam"""] ) __UpperCAmelCase : List[str] = tokenizer.additional_special_tokens additional_special_tokens.append("""new_additional_special_token""" ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) __UpperCAmelCase : int = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) tokenizer.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = tokenizer.__class__.from_pretrained(UpperCamelCase ) __UpperCAmelCase : List[Any] = after_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __UpperCAmelCase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(UpperCamelCase ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : str = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCamelCase ) with open(os.path.join(UpperCamelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: __UpperCAmelCase : List[Any] = json.load(UpperCamelCase ) with open(os.path.join(UpperCamelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: __UpperCAmelCase : List[Any] = json.load(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = [f'''<extra_id_{i}>''' for i in range(125 )] __UpperCAmelCase : str = added_tokens_extra_ids + [ """an_additional_special_token""" ] __UpperCAmelCase : str = added_tokens_extra_ids + [ """an_additional_special_token""" ] with open(os.path.join(UpperCamelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(UpperCamelCase , UpperCamelCase ) with open(os.path.join(UpperCamelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(UpperCamelCase , UpperCamelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __UpperCAmelCase : List[str] = tokenizer_class.from_pretrained( UpperCamelCase , ) self.assertIn( """an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __UpperCAmelCase : Optional[Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=UpperCamelCase )] __UpperCAmelCase : Dict = tokenizer_class.from_pretrained( UpperCamelCase , additional_special_tokens=UpperCamelCase , ) self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens ) self.assertEqual( ["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , """�""" ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Any ): '''simple docstring''' pass def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : List[str] = self.get_tokenizers(fast=UpperCamelCase , do_lower_case=UpperCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase : Optional[int] = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""] __UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase ) self.assertIsInstance(UpperCamelCase , UpperCamelCase )
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCAmelCase : Dict = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" import os def lowerCamelCase ( _UpperCamelCase : str = "input.txt" ) -> int: '''simple docstring''' with open(os.path.join(os.path.dirname(_UpperCamelCase ) , _UpperCamelCase ) ) as input_file: __UpperCAmelCase : Union[str, Any] = [ [int(_UpperCamelCase ) for element in line.split(""",""" )] for line in input_file.readlines() ] __UpperCAmelCase : List[str] = len(_UpperCamelCase ) __UpperCAmelCase : str = len(matrix[0] ) __UpperCAmelCase : List[Any] = [[-1 for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )] for i in range(_UpperCamelCase ): __UpperCAmelCase : int = matrix[i][0] for j in range(1 , _UpperCamelCase ): for i in range(_UpperCamelCase ): __UpperCAmelCase : List[Any] = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , _UpperCamelCase ): __UpperCAmelCase : Tuple = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): __UpperCAmelCase : List[str] = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(F"{solution() = }")
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase : List[str] = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" import re def lowerCamelCase ( _UpperCamelCase : str ) -> bool: '''simple docstring''' __UpperCAmelCase : str = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" ) if match := re.search(_UpperCamelCase , _UpperCamelCase ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator('+918827897895'))
320
"""simple docstring""" def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = [] __UpperCAmelCase : List[str] = 1 while len(_UpperCamelCase ) < 1E6: constant.append(str(_UpperCamelCase ) ) i += 1 __UpperCAmelCase : List[str] = """""".join(_UpperCamelCase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[9_9] ) * int(constant[9_9_9] ) * int(constant[9_9_9_9] ) * int(constant[9_9_9_9_9] ) * int(constant[9_9_9_9_9_9] ) ) if __name__ == "__main__": print(solution())
320
1
"""simple docstring""" import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging UpperCAmelCase : Any = logging.get_logger(__name__) def lowerCamelCase ( ) -> int: '''simple docstring''' __UpperCAmelCase : str = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. __UpperCAmelCase : str = json.loads(_UpperCamelCase ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. __UpperCAmelCase : Optional[int] = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". __UpperCAmelCase : Any = json.loads(_UpperCamelCase ) if not mpi_options.get("""sagemaker_mpi_enabled""" , _UpperCamelCase ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("""smdistributed""" ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class lowerCamelCase__ ( A ): """simple docstring""" __a = field( default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' super().__post_init__() warnings.warn( """`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """ """`TrainingArguments` instead.""" , UpperCamelCase , ) @cached_property def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' logger.info("""PyTorch: setting up devices""" ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( """torch.distributed process group is initialized, but local_rank == -1. """ """In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" ) if self.no_cuda: __UpperCAmelCase : Optional[Any] = torch.device("""cpu""" ) __UpperCAmelCase : Union[str, Any] = 0 elif is_sagemaker_model_parallel_available(): __UpperCAmelCase : Tuple = smp.local_rank() __UpperCAmelCase : Optional[Any] = torch.device("""cuda""" , UpperCamelCase ) __UpperCAmelCase : str = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta ) __UpperCAmelCase : Dict = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) ) __UpperCAmelCase : Dict = torch.device("""cuda""" , self.local_rank ) __UpperCAmelCase : Optional[int] = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 __UpperCAmelCase : Optional[Any] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. __UpperCAmelCase : Dict = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta ) __UpperCAmelCase : str = torch.device("""cuda""" , self.local_rank ) __UpperCAmelCase : List[str] = 1 if device.type == "cuda": torch.cuda.set_device(UpperCamelCase ) return device @property def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' return not is_sagemaker_model_parallel_available() @property def lowerCamelCase__ ( self : str ): '''simple docstring''' return False
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Tuple = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils ) __UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] ) __UpperCAmelCase : Dict = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] ) __UpperCAmelCase : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] ) @require_multi_gpu def lowerCamelCase__ ( self : Dict ): '''simple docstring''' print(f'''Found {torch.cuda.device_count()} devices.''' ) __UpperCAmelCase : Union[str, Any] = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCamelCase , env=os.environ.copy() ) @require_multi_gpu def lowerCamelCase__ ( self : str ): '''simple docstring''' print(f'''Found {torch.cuda.device_count()} devices.''' ) __UpperCAmelCase : str = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path] print(f'''Command: {cmd}''' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCamelCase , env=os.environ.copy() ) @require_multi_gpu def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : List[str] = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCamelCase , env=os.environ.copy() ) @require_multi_gpu def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' ) __UpperCAmelCase : Optional[int] = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ): execute_subprocess_async(UpperCamelCase , env=os.environ.copy() ) if __name__ == "__main__": UpperCAmelCase : Tuple = Accelerator() UpperCAmelCase : Union[str, Any] = (accelerator.state.process_index + 2, 10) UpperCAmelCase : Any = torch.randint(0, 10, shape).to(accelerator.device) UpperCAmelCase : int = '' UpperCAmelCase : List[str] = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." UpperCAmelCase : Tuple = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." UpperCAmelCase : Dict = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
320
"""simple docstring""" import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput UpperCAmelCase : Optional[Any] = 'scheduler_config.json' class lowerCamelCase__ ( A ): """simple docstring""" __a = 1 __a = 2 __a = 3 __a = 4 __a = 5 __a = 6 __a = 7 __a = 8 __a = 9 __a = 10 __a = 11 __a = 12 __a = 13 __a = 14 @dataclass class lowerCamelCase__ ( A ): """simple docstring""" __a = 42 class lowerCamelCase__ : """simple docstring""" __a = SCHEDULER_CONFIG_NAME __a = [] __a = True @classmethod def lowerCamelCase__ ( cls : Any , UpperCamelCase : Dict[str, Any] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[Any] = cls.load_config( pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , ) return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : Union[str, os.PathLike] , UpperCamelCase : bool = False , **UpperCamelCase : Optional[Any] ): '''simple docstring''' self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return self._get_compatibles() @classmethod def lowerCamelCase__ ( cls : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = list(set([cls.__name__] + cls._compatibles ) ) __UpperCAmelCase : List[str] = importlib.import_module(__name__.split(""".""" )[0] ) __UpperCAmelCase : List[str] = [ getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase ) ] return compatible_classes
320
1
"""simple docstring""" from PIL import Image def lowerCamelCase ( _UpperCamelCase : Image , _UpperCamelCase : int ) -> Image: '''simple docstring''' __UpperCAmelCase : List[str] = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level)) def contrast(_UpperCamelCase : int ) -> int: return int(1_2_8 + factor * (c - 1_2_8) ) return img.point(_UpperCamelCase ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change contrast to 170 UpperCAmelCase : Any = change_contrast(img, 170) cont_img.save('image_data/lena_high_contrast.png', format='png')
320
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase__ ( *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ): '''simple docstring''' pass def lowerCamelCase ( _UpperCamelCase : Image ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = hashlib.mda(image.tobytes() ) return m.hexdigest()[:1_0] def lowerCamelCase ( _UpperCamelCase : Image ) -> Dict: '''simple docstring''' __UpperCAmelCase : Tuple = np.array(_UpperCamelCase ) __UpperCAmelCase : List[Any] = npimg.shape return {"hash": hashimage(_UpperCamelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" __a = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __a = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' pass @require_tf @unittest.skip("""Image segmentation not implemented in TF""" ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Tuple = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" ) __UpperCAmelCase : Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : int = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, {"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967}, {"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993}, {"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909}, {"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879}, {"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834}, {"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716}, {"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612}, {"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599}, {"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552}, {"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532}, {"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516}, {"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499}, {"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483}, {"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464}, {"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408}, {"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335}, {"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326}, {"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262}, {"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999}, {"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986}, {"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984}, {"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873}, {"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871} ] , ) # fmt: on @require_torch @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Any = """facebook/sam-vit-huge""" __UpperCAmelCase : str = pipeline("""mask-generation""" , model=UpperCamelCase ) __UpperCAmelCase : int = image_segmenter( """http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : Dict = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, ] , )
320
1
"""simple docstring""" # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : Dict[Optional[str], Type[Formatter]] = {} UpperCAmelCase : Dict[Optional[str], str] = {} UpperCAmelCase : Dict[Optional[str], Exception] = {} def lowerCamelCase ( _UpperCamelCase : type , _UpperCamelCase : Optional[str] , _UpperCamelCase : Optional[List[str]] = None , ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Tuple = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' ) __UpperCAmelCase : Tuple = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' ) __UpperCAmelCase : List[Any] = format_type def lowerCamelCase ( _UpperCamelCase : Exception , _UpperCamelCase : Optional[str] , _UpperCamelCase : Optional[List[str]] = None ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): __UpperCAmelCase : Optional[Any] = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['python']) _register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow']) _register_formatter(NumpyFormatter, 'numpy', aliases=['np']) _register_formatter(PandasFormatter, 'pandas', aliases=['pd']) _register_formatter(CustomFormatter, 'custom') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch']) else: UpperCAmelCase : Dict = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.') _register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, 'tensorflow', aliases=['tf']) else: UpperCAmelCase : Optional[Any] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.') _register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, 'jax', aliases=[]) else: UpperCAmelCase : str = ValueError('JAX needs to be installed to be able to return JAX arrays.') _register_unavailable_formatter(_jax_error, 'jax', aliases=[]) def lowerCamelCase ( _UpperCamelCase : Optional[str] ) -> Optional[str]: '''simple docstring''' if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def lowerCamelCase ( _UpperCamelCase : Optional[str] , **_UpperCamelCase : int ) -> Formatter: '''simple docstring''' __UpperCAmelCase : Dict = get_format_type_from_alias(_UpperCamelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**_UpperCamelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
320
"""simple docstring""" import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , UpperCamelCase : str ): '''simple docstring''' super().__init__() __UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase ) __UpperCAmelCase : int = list(model.children() )[:-2] __UpperCAmelCase : List[Any] = nn.Sequential(*UpperCamelCase ) __UpperCAmelCase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.pool(self.model(UpperCamelCase ) ) __UpperCAmelCase : List[Any] = torch.flatten(UpperCamelCase , start_dim=2 ) __UpperCAmelCase : Any = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )] __UpperCAmelCase : Any = os.path.dirname(UpperCamelCase ) __UpperCAmelCase : List[str] = tokenizer __UpperCAmelCase : str = labels __UpperCAmelCase : Optional[int] = len(UpperCamelCase ) __UpperCAmelCase : int = max_seq_length __UpperCAmelCase : int = transforms def __len__( self : List[str] ): '''simple docstring''' return len(self.data ) def __getitem__( self : List[str] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) ) __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1] __UpperCAmelCase : Any = sentence[: self.max_seq_length] __UpperCAmelCase : Tuple = torch.zeros(self.n_classes ) __UpperCAmelCase : str = 1 __UpperCAmelCase : Any = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) __UpperCAmelCase : Optional[int] = self.transforms(UpperCamelCase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Any = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = [len(row["""sentence"""] ) for row in batch] __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase ) __UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) __UpperCAmelCase : str = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ): __UpperCAmelCase : List[str] = input_row["""sentence"""] __UpperCAmelCase : Tuple = 1 __UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] ) __UpperCAmelCase : Optional[Any] = torch.stack([row["""label"""] for row in batch] ) __UpperCAmelCase : str = torch.stack([row["""image_start_token"""] for row in batch] ) __UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ) -> int: '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' return transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ), ] )
320
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : str = logging.get_logger(__name__) UpperCAmelCase : Dict = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class lowerCamelCase__ ( A ): """simple docstring""" __a = """cvt""" def __init__( self : Optional[int] , UpperCamelCase : str=3 , UpperCamelCase : Union[str, Any]=[7, 3, 3] , UpperCamelCase : Dict=[4, 2, 2] , UpperCamelCase : Dict=[2, 1, 1] , UpperCamelCase : Optional[int]=[64, 192, 384] , UpperCamelCase : Optional[Any]=[1, 3, 6] , UpperCamelCase : str=[1, 2, 10] , UpperCamelCase : str=[4.0, 4.0, 4.0] , UpperCamelCase : Any=[0.0, 0.0, 0.0] , UpperCamelCase : Any=[0.0, 0.0, 0.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.1] , UpperCamelCase : str=[True, True, True] , UpperCamelCase : Optional[Any]=[False, False, True] , UpperCamelCase : List[Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase : Tuple=[3, 3, 3] , UpperCamelCase : int=[1, 1, 1] , UpperCamelCase : str=[2, 2, 2] , UpperCamelCase : Optional[Any]=[1, 1, 1] , UpperCamelCase : List[Any]=[1, 1, 1] , UpperCamelCase : List[str]=0.02 , UpperCamelCase : Any=1e-1_2 , **UpperCamelCase : int , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : List[str] = num_channels __UpperCAmelCase : int = patch_sizes __UpperCAmelCase : Union[str, Any] = patch_stride __UpperCAmelCase : Any = patch_padding __UpperCAmelCase : Union[str, Any] = embed_dim __UpperCAmelCase : Any = num_heads __UpperCAmelCase : Tuple = depth __UpperCAmelCase : str = mlp_ratio __UpperCAmelCase : Dict = attention_drop_rate __UpperCAmelCase : Dict = drop_rate __UpperCAmelCase : Any = drop_path_rate __UpperCAmelCase : Dict = qkv_bias __UpperCAmelCase : Any = cls_token __UpperCAmelCase : Optional[int] = qkv_projection_method __UpperCAmelCase : Optional[int] = kernel_qkv __UpperCAmelCase : str = padding_kv __UpperCAmelCase : Optional[Any] = stride_kv __UpperCAmelCase : List[str] = padding_q __UpperCAmelCase : Union[str, Any] = stride_q __UpperCAmelCase : Any = initializer_range __UpperCAmelCase : Tuple = layer_norm_eps
320
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
320
1
"""simple docstring""" from __future__ import annotations from math import pi, sqrt def lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float ) -> tuple: '''simple docstring''' if inductance <= 0: raise ValueError("""Inductance cannot be 0 or negative""" ) elif capacitance <= 0: raise ValueError("""Capacitance cannot be 0 or negative""" ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ) __UpperCAmelCase : List[Any] = sum(_UpperCamelCase ) __UpperCAmelCase : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __UpperCAmelCase : Any = True for i in range(1 , s + 1 ): __UpperCAmelCase : List[Any] = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __UpperCAmelCase : Optional[int] = dp[i][j - 1] if arr[i - 1] <= j: __UpperCAmelCase : Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __UpperCAmelCase : Optional[int] = s - 2 * j break return diff
320
1
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase : Any = { 'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class lowerCamelCase__ ( A ): """simple docstring""" __a = """deformable_detr""" __a = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : str , UpperCamelCase : int=True , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : List[Any]=3 , UpperCamelCase : int=300 , UpperCamelCase : List[Any]=1_024 , UpperCamelCase : Tuple=6 , UpperCamelCase : Optional[Any]=1_024 , UpperCamelCase : str=8 , UpperCamelCase : Tuple=6 , UpperCamelCase : Optional[Any]=1_024 , UpperCamelCase : int=8 , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : Any=True , UpperCamelCase : int="relu" , UpperCamelCase : Tuple=256 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Any=0.02 , UpperCamelCase : Any=1.0 , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Dict=False , UpperCamelCase : Tuple="sine" , UpperCamelCase : Union[str, Any]="resnet50" , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[int]=False , UpperCamelCase : int=4 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : int=4 , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Union[str, Any]=300 , UpperCamelCase : Dict=False , UpperCamelCase : int=1 , UpperCamelCase : Optional[Any]=5 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Dict=1 , UpperCamelCase : Any=1 , UpperCamelCase : Optional[int]=5 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : str=0.25 , UpperCamelCase : int=False , **UpperCamelCase : int , ): '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) __UpperCAmelCase : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : List[str] = backbone_config.get("""model_type""" ) __UpperCAmelCase : Tuple = CONFIG_MAPPING[backbone_model_type] __UpperCAmelCase : str = config_class.from_dict(UpperCamelCase ) __UpperCAmelCase : int = use_timm_backbone __UpperCAmelCase : Dict = backbone_config __UpperCAmelCase : Union[str, Any] = num_channels __UpperCAmelCase : int = num_queries __UpperCAmelCase : Any = max_position_embeddings __UpperCAmelCase : Tuple = d_model __UpperCAmelCase : Any = encoder_ffn_dim __UpperCAmelCase : List[Any] = encoder_layers __UpperCAmelCase : List[str] = encoder_attention_heads __UpperCAmelCase : Tuple = decoder_ffn_dim __UpperCAmelCase : Optional[int] = decoder_layers __UpperCAmelCase : Union[str, Any] = decoder_attention_heads __UpperCAmelCase : List[Any] = dropout __UpperCAmelCase : Optional[Any] = attention_dropout __UpperCAmelCase : str = activation_dropout __UpperCAmelCase : int = activation_function __UpperCAmelCase : Optional[Any] = init_std __UpperCAmelCase : Any = init_xavier_std __UpperCAmelCase : Optional[Any] = encoder_layerdrop __UpperCAmelCase : Tuple = auxiliary_loss __UpperCAmelCase : Union[str, Any] = position_embedding_type __UpperCAmelCase : List[Any] = backbone __UpperCAmelCase : int = use_pretrained_backbone __UpperCAmelCase : str = dilation # deformable attributes __UpperCAmelCase : Dict = num_feature_levels __UpperCAmelCase : int = encoder_n_points __UpperCAmelCase : Any = decoder_n_points __UpperCAmelCase : Dict = two_stage __UpperCAmelCase : Optional[Any] = two_stage_num_proposals __UpperCAmelCase : Optional[Any] = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("""If two_stage is True, with_box_refine must be True.""" ) # Hungarian matcher __UpperCAmelCase : Any = class_cost __UpperCAmelCase : str = bbox_cost __UpperCAmelCase : str = giou_cost # Loss coefficients __UpperCAmelCase : str = mask_loss_coefficient __UpperCAmelCase : Optional[int] = dice_loss_coefficient __UpperCAmelCase : List[Any] = bbox_loss_coefficient __UpperCAmelCase : Dict = giou_loss_coefficient __UpperCAmelCase : str = eos_coefficient __UpperCAmelCase : Optional[Any] = focal_alpha __UpperCAmelCase : Optional[Any] = disable_custom_kernels super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return self.encoder_attention_heads @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return self.d_model def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: __UpperCAmelCase : List[str] = self.backbone_config.to_dict() __UpperCAmelCase : Optional[int] = self.__class__.model_type return output
320
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase : Optional[int] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""pixel_values"""] def __init__( self : Tuple , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : int = do_resize __UpperCAmelCase : Tuple = size __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : Any = do_center_crop __UpperCAmelCase : int = crop_size __UpperCAmelCase : Optional[int] = do_rescale __UpperCAmelCase : List[Any] = rescale_factor __UpperCAmelCase : Tuple = do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD __UpperCAmelCase : List[Any] = do_convert_rgb def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __UpperCAmelCase : int = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Dict = size if size is not None else self.size __UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Dict = resample if resample is not None else self.resample __UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Any = image_std if image_std is not None else self.image_std __UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCAmelCase : List[str] = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCAmelCase : int = [convert_to_rgb(UpperCamelCase ) for image in images] # All transformations expect numpy arrays. __UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: __UpperCAmelCase : Optional[int] = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: __UpperCAmelCase : int = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: __UpperCAmelCase : Dict = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: __UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] __UpperCAmelCase : Any = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] __UpperCAmelCase : Any = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
320
1
"""simple docstring""" import requests UpperCAmelCase : Optional[int] = 'YOUR API KEY' def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : str = giphy_api_key ) -> list: '''simple docstring''' __UpperCAmelCase : List[Any] = """+""".join(query.split() ) __UpperCAmelCase : Union[str, Any] = f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}''' __UpperCAmelCase : str = requests.get(_UpperCamelCase ).json()["""data"""] return [gif["url"] for gif in gifs] if __name__ == "__main__": print('\n'.join(get_gifs('space ship')))
320
"""simple docstring""" from collections.abc import Sequence def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) ) def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' __UpperCAmelCase : Dict = 0.0 for coeff in reversed(_UpperCamelCase ): __UpperCAmelCase : Any = result * x + coeff return result if __name__ == "__main__": UpperCAmelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCAmelCase : str = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
320
1
"""simple docstring""" from functools import reduce UpperCAmelCase : str = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def lowerCamelCase ( _UpperCamelCase : str = N ) -> int: '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda _UpperCamelCase , _UpperCamelCase : str(int(_UpperCamelCase ) * int(_UpperCamelCase ) ) , n[i : i + 1_3] ) ) for i in range(len(_UpperCamelCase ) - 1_2 ) ) if __name__ == "__main__": print(F"{solution() = }")
320
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase : Optional[int] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class lowerCamelCase__ : """simple docstring""" __a = PegasusConfig __a = {} __a = """gelu""" def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Any=4 , UpperCamelCase : Tuple=37 , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : List[str]=2 , UpperCamelCase : int=1 , UpperCamelCase : Optional[Any]=0 , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : str = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Dict = is_training __UpperCAmelCase : Dict = use_labels __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[Any] = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : List[Any] = max_position_embeddings __UpperCAmelCase : Any = eos_token_id __UpperCAmelCase : Optional[int] = pad_token_id __UpperCAmelCase : List[str] = bos_token_id def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __UpperCAmelCase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCAmelCase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCAmelCase : Any = prepare_pegasus_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, inputs_dict def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : Tuple = model_class_name(UpperCamelCase ) __UpperCAmelCase : List[Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : int = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Tuple = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Dict = model.decode(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : int = model_class_name(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : Dict = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : int = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : List[str] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Union[str, Any] = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Any=None , ) -> Dict: '''simple docstring''' if attention_mask is None: __UpperCAmelCase : Optional[int] = np.not_equal(_UpperCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __UpperCAmelCase : Dict = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __a = True __a = False __a = False __a = False def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = FlaxPegasusModelTester(self ) __UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = model_class(UpperCamelCase ) @jax.jit def encode_jitted(UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]=None , **UpperCamelCase : List[str] ): return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Tuple = encode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : Optional[int] = encode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : int = model_class(UpperCamelCase ) __UpperCAmelCase : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __UpperCAmelCase : Any = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ): return model.decode( decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : str = decode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCamelCase ) __UpperCAmelCase : Optional[int] = np.ones((1, 1) ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : List[Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCAmelCase : List[str] = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""np""" , truncation=UpperCamelCase , max_length=512 , padding=UpperCamelCase ) __UpperCAmelCase : int = model.generate(**UpperCamelCase , num_beams=2 ).sequences __UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) assert tgt_text == decoded
320
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""transformers""", """torch""", """note_seq"""] def __init__( self : Tuple , *UpperCamelCase : List[Any] , **UpperCamelCase : List[Any] ): '''simple docstring''' requires_backends(self , ["""transformers""", """torch""", """note_seq"""] ) @classmethod def lowerCamelCase__ ( cls : Optional[int] , *UpperCamelCase : Dict , **UpperCamelCase : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] ) @classmethod def lowerCamelCase__ ( cls : Tuple , *UpperCamelCase : List[str] , **UpperCamelCase : List[Any] ): '''simple docstring''' requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
320
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : List[str] = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } UpperCAmelCase : List[str] = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = EfficientNetConfig() __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""hidden_dim"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""] __UpperCAmelCase : str = CONFIG_MAP[model_name]["""depth_coef"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""dropout_rate"""] __UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""] __UpperCAmelCase : int = """huggingface/label-files""" __UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json""" __UpperCAmelCase : str = 1_0_0_0 __UpperCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : Dict = idalabel __UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" __UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im def lowerCamelCase ( _UpperCamelCase : Any ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : List[str] = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , ) return preprocessor def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] __UpperCAmelCase : str = sorted(set(_UpperCamelCase ) ) __UpperCAmelCase : Optional[int] = len(_UpperCamelCase ) __UpperCAmelCase : Any = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )} __UpperCAmelCase : Any = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: __UpperCAmelCase : List[str] = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) __UpperCAmelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: __UpperCAmelCase : Optional[Any] = """efficientnet.""" + item[1] __UpperCAmelCase : Tuple = """classifier.weight""" __UpperCAmelCase : Optional[int] = """classifier.bias""" return key_mapping def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Tuple: '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue __UpperCAmelCase : List[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: __UpperCAmelCase : int = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __UpperCAmelCase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __UpperCAmelCase : List[str] = torch.from_numpy(np.transpose(_UpperCamelCase ) ) else: __UpperCAmelCase : Tuple = torch.from_numpy(_UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCamelCase ) @torch.no_grad() def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = model_classes[model_name]( include_top=_UpperCamelCase , weights="""imagenet""" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , ) __UpperCAmelCase : List[str] = original_model.trainable_variables __UpperCAmelCase : List[Any] = original_model.non_trainable_variables __UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __UpperCAmelCase : int = param.numpy() __UpperCAmelCase : Dict = list(tf_params.keys() ) # Load HuggingFace model __UpperCAmelCase : Optional[Any] = get_efficientnet_config(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(_UpperCamelCase ).eval() __UpperCAmelCase : Any = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) __UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase ) replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Initialize preprocessor and preprocess input image __UpperCAmelCase : List[Any] = convert_image_processor(_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[int] = hf_model(**_UpperCamelCase ) __UpperCAmelCase : Any = outputs.logits.detach().numpy() # Original model inference __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __UpperCAmelCase : Optional[Any] = image.img_to_array(_UpperCamelCase ) __UpperCAmelCase : Tuple = np.expand_dims(_UpperCamelCase , axis=0 ) __UpperCAmelCase : str = original_model.predict(_UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCamelCase ): os.mkdir(_UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCamelCase ) preprocessor.save_pretrained(_UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) __UpperCAmelCase : List[str] = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_UpperCamelCase ) hf_model.push_to_hub(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') UpperCAmelCase : Any = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
320
1
"""simple docstring""" import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( """files""" , [ ["""full:README.md""", """dataset_infos.json"""], ["""empty:README.md""", """dataset_infos.json"""], ["""dataset_infos.json"""], ["""full:README.md"""], ] , ) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : int = tmp_path_factory.mktemp("""dset_infos_dir""" ) if "full:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""---\ndataset_info:\n dataset_size: 42\n---""" ) if "empty:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""""" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f: f.write("""{\"default\": {\"dataset_size\": 42}}""" ) __UpperCAmelCase : int = DatasetInfosDict.from_directory(_UpperCamelCase ) assert dataset_infos assert dataset_infos["default"].dataset_size == 4_2 @pytest.mark.parametrize( """dataset_info""" , [ DatasetInfo(), DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ), ] , ) def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : DatasetInfo ) -> Any: '''simple docstring''' __UpperCAmelCase : str = str(_UpperCamelCase ) dataset_info.write_to_directory(_UpperCamelCase ) __UpperCAmelCase : int = DatasetInfo.from_directory(_UpperCamelCase ) assert dataset_info == reloaded assert os.path.exists(os.path.join(_UpperCamelCase , """dataset_info.json""" ) ) def lowerCamelCase ( ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Dict = DatasetInfo( description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , ) __UpperCAmelCase : int = dataset_info._to_yaml_dict() assert sorted(_UpperCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) __UpperCAmelCase : Union[str, Any] = yaml.safe_dump(_UpperCamelCase ) __UpperCAmelCase : int = yaml.safe_load(_UpperCamelCase ) assert dataset_info_yaml_dict == reloaded def lowerCamelCase ( ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : int = DatasetInfo() __UpperCAmelCase : List[Any] = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( """dataset_infos_dict""" , [ DatasetInfosDict(), DatasetInfosDict({"""default""": DatasetInfo()} ), DatasetInfosDict({"""my_config_name""": DatasetInfo()} ), DatasetInfosDict( { """default""": DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ) } ), DatasetInfosDict( { """v1""": DatasetInfo(dataset_size=4_2 ), """v2""": DatasetInfo(dataset_size=1_3_3_7 ), } ), ] , ) def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : DatasetInfosDict ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = str(_UpperCamelCase ) dataset_infos_dict.write_to_directory(_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(_UpperCamelCase ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): __UpperCAmelCase : List[Any] = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml __UpperCAmelCase : str = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(_UpperCamelCase , """README.md""" ) )
320
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""keras_nlp"""] def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ): '''simple docstring''' requires_backends(self , ["""keras_nlp"""] )
320
1
"""simple docstring""" UpperCAmelCase : Optional[Any] = { 'Pillow': 'Pillow<10.0.0', 'accelerate': 'accelerate>=0.20.3', 'av': 'av==9.2.0', 'beautifulsoup4': 'beautifulsoup4', 'black': 'black~=23.1', 'codecarbon': 'codecarbon==1.2.0', 'cookiecutter': 'cookiecutter==1.7.3', 'dataclasses': 'dataclasses', 'datasets': 'datasets!=2.5.0', 'decord': 'decord==0.6.0', 'deepspeed': 'deepspeed>=0.9.3', 'diffusers': 'diffusers', 'dill': 'dill<0.3.5', 'evaluate': 'evaluate>=0.2.0', 'fairscale': 'fairscale>0.3', 'faiss-cpu': 'faiss-cpu', 'fastapi': 'fastapi', 'filelock': 'filelock', 'flax': 'flax>=0.4.1,<=0.7.0', 'ftfy': 'ftfy', 'fugashi': 'fugashi>=1.0', 'GitPython': 'GitPython<3.1.19', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0', 'importlib_metadata': 'importlib_metadata', 'ipadic': 'ipadic>=1.0.0,<2.0', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13', 'jaxlib': 'jaxlib>=0.1.65,<=0.4.13', 'jieba': 'jieba', 'kenlm': 'kenlm', 'keras-nlp': 'keras-nlp>=0.3.1', 'librosa': 'librosa', 'nltk': 'nltk', 'natten': 'natten>=0.14.6', 'numpy': 'numpy>=1.17', 'onnxconverter-common': 'onnxconverter-common', 'onnxruntime-tools': 'onnxruntime-tools>=1.4.2', 'onnxruntime': 'onnxruntime>=1.4.0', 'opencv-python': 'opencv-python', 'optuna': 'optuna', 'optax': 'optax>=0.0.8,<=0.1.4', 'packaging': 'packaging>=20.0', 'parameterized': 'parameterized', 'phonemizer': 'phonemizer', 'protobuf': 'protobuf', 'psutil': 'psutil', 'pyyaml': 'pyyaml>=5.1', 'pydantic': 'pydantic<2', 'pytest': 'pytest>=7.2.0', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'python': 'python>=3.8.0', 'ray[tune]': 'ray[tune]', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'rhoknp': 'rhoknp>=1.1.0,<1.3.1', 'rjieba': 'rjieba', 'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1', 'ruff': 'ruff>=0.0.241,<=0.0.259', 'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0', 'sacremoses': 'sacremoses', 'safetensors': 'safetensors>=0.3.1', 'sagemaker': 'sagemaker>=2.31.0', 'scikit-learn': 'scikit-learn', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'sigopt': 'sigopt', 'starlette': 'starlette', 'sudachipy': 'sudachipy>=0.6.6', 'sudachidict_core': 'sudachidict_core>=20220729', 'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14', 'tensorflow': 'tensorflow>=2.6,<2.14', 'tensorflow-text': 'tensorflow-text<2.14', 'tf2onnx': 'tf2onnx', 'timeout-decorator': 'timeout-decorator', 'timm': 'timm', 'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14', 'torch': 'torch>=1.9,!=1.12.0', 'torchaudio': 'torchaudio', 'torchvision': 'torchvision', 'pyctcdecode': 'pyctcdecode>=0.4.0', 'tqdm': 'tqdm>=4.27', 'unidic': 'unidic>=1.0.2', 'unidic_lite': 'unidic_lite>=1.0.7', 'urllib3': 'urllib3<2.0.0', 'uvicorn': 'uvicorn', }
320
"""simple docstring""" UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_UpperCamelCase ) __UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data ) __UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0 if padding_needed: # The padding that will be added later __UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6) else: __UpperCAmelCase : List[str] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode() + padding ) def lowerCamelCase ( _UpperCamelCase : str ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Tuple = ( """argument should be a bytes-like object or ASCII string, """ f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_UpperCamelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_UpperCamelCase , _UpperCamelCase ): try: __UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) __UpperCAmelCase : str = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one __UpperCAmelCase : List[str] = encoded_data[:-padding] __UpperCAmelCase : int = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: __UpperCAmelCase : Optional[Any] = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data ) __UpperCAmelCase : List[Any] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_UpperCamelCase ) , 8 ) ] return bytes(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
320
1
"""simple docstring""" from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) UpperCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCAmelCase : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n' def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any]=8 ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[int] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 __UpperCAmelCase : Optional[int] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any=5_1_2 , _UpperCamelCase : List[str]=5_1_2 ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) __UpperCAmelCase : str = np.array(pil_image.convert("""RGB""" ) ) __UpperCAmelCase : Optional[Any] = arr.astype(np.floataa ) / 127.5 - 1 __UpperCAmelCase : List[Any] = np.transpose(_UpperCamelCase , [2, 0, 1] ) __UpperCAmelCase : Union[str, Any] = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 ) return image class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Tuple , UpperCamelCase : UNetaDConditionModel , UpperCamelCase : DDPMScheduler , UpperCamelCase : VQModel , ): '''simple docstring''' super().__init__() self.register_modules( unet=UpperCamelCase , scheduler=UpperCamelCase , movq=UpperCamelCase , ) __UpperCAmelCase : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : List[str] = min(int(num_inference_steps * strength ) , UpperCamelCase ) __UpperCAmelCase : int = max(num_inference_steps - init_timestep , 0 ) __UpperCAmelCase : List[str] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def lowerCamelCase__ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Tuple=None ): '''simple docstring''' if not isinstance(UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCamelCase )}''' ) __UpperCAmelCase : Optional[Any] = image.to(device=UpperCamelCase , dtype=UpperCamelCase ) __UpperCAmelCase : List[str] = batch_size * num_images_per_prompt if image.shape[1] == 4: __UpperCAmelCase : Tuple = image else: if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(UpperCamelCase )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) elif isinstance(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : List[Any] = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCamelCase ) ] __UpperCAmelCase : Any = torch.cat(UpperCamelCase , dim=0 ) else: __UpperCAmelCase : List[str] = self.movq.encode(UpperCamelCase ).latent_dist.sample(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = self.movq.config.scaling_factor * init_latents __UpperCAmelCase : Any = torch.cat([init_latents] , dim=0 ) __UpperCAmelCase : Any = init_latents.shape __UpperCAmelCase : str = randn_tensor(UpperCamelCase , generator=UpperCamelCase , device=UpperCamelCase , dtype=UpperCamelCase ) # get latents __UpperCAmelCase : Union[str, Any] = self.scheduler.add_noise(UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = init_latents return latents def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : int=0 ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) __UpperCAmelCase : List[str] = torch.device(f'''cuda:{gpu_id}''' ) __UpperCAmelCase : Any = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[int]=0 ): '''simple docstring''' if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) __UpperCAmelCase : List[Any] = torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) __UpperCAmelCase : str = None for cpu_offloaded_model in [self.unet, self.movq]: __UpperCAmelCase ,__UpperCAmelCase : Tuple = cpu_offload_with_hook(UpperCamelCase , UpperCamelCase , prev_module_hook=UpperCamelCase ) # We'll offload the last model manually. __UpperCAmelCase : List[Any] = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCamelCase__ ( self : Any ): '''simple docstring''' if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(UpperCamelCase , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(UpperCamelCase ) def __call__( self : List[Any] , UpperCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , UpperCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase : int = 512 , UpperCamelCase : int = 512 , UpperCamelCase : int = 100 , UpperCamelCase : float = 4.0 , UpperCamelCase : float = 0.3 , UpperCamelCase : int = 1 , UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self._execution_device __UpperCAmelCase : List[Any] = guidance_scale > 1.0 if isinstance(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : Any = torch.cat(UpperCamelCase , dim=0 ) __UpperCAmelCase : int = image_embeds.shape[0] if isinstance(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : int = torch.cat(UpperCamelCase , dim=0 ) if do_classifier_free_guidance: __UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(UpperCamelCase , dim=0 ) __UpperCAmelCase : Union[str, Any] = negative_image_embeds.repeat_interleave(UpperCamelCase , dim=0 ) __UpperCAmelCase : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase ) if not isinstance(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : List[str] = [image] if not all(isinstance(UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( f'''Input is in incorrect format: {[type(UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' ) __UpperCAmelCase : List[Any] = torch.cat([prepare_image(UpperCamelCase , UpperCamelCase , UpperCamelCase ) for i in image] , dim=0 ) __UpperCAmelCase : Optional[int] = image.to(dtype=image_embeds.dtype , device=UpperCamelCase ) __UpperCAmelCase : int = self.movq.encode(UpperCamelCase )["""latents"""] __UpperCAmelCase : Tuple = latents.repeat_interleave(UpperCamelCase , dim=0 ) self.scheduler.set_timesteps(UpperCamelCase , device=UpperCamelCase ) __UpperCAmelCase ,__UpperCAmelCase : List[str] = self.get_timesteps(UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt ) __UpperCAmelCase ,__UpperCAmelCase : int = downscale_height_and_width(UpperCamelCase , UpperCamelCase , self.movq_scale_factor ) __UpperCAmelCase : int = self.prepare_latents( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , image_embeds.dtype , UpperCamelCase , UpperCamelCase ) for i, t in enumerate(self.progress_bar(UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance __UpperCAmelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __UpperCAmelCase : Any = {"""image_embeds""": image_embeds} __UpperCAmelCase : List[str] = self.unet( sample=UpperCamelCase , timestep=UpperCamelCase , encoder_hidden_states=UpperCamelCase , added_cond_kwargs=UpperCamelCase , return_dict=UpperCamelCase , )[0] if do_classifier_free_guidance: __UpperCAmelCase ,__UpperCAmelCase : List[Any] = noise_pred.split(latents.shape[1] , dim=1 ) __UpperCAmelCase ,__UpperCAmelCase : Any = noise_pred.chunk(2 ) __UpperCAmelCase ,__UpperCAmelCase : Any = variance_pred.chunk(2 ) __UpperCAmelCase : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) __UpperCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 __UpperCAmelCase : List[Any] = self.scheduler.step( UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase , )[0] # post-processing __UpperCAmelCase : List[str] = self.movq.decode(UpperCamelCase , force_not_quantize=UpperCamelCase )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: __UpperCAmelCase : int = image * 0.5 + 0.5 __UpperCAmelCase : List[Any] = image.clamp(0 , 1 ) __UpperCAmelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __UpperCAmelCase : List[Any] = self.numpy_to_pil(UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase )
320
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor UpperCAmelCase : str = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ): '''simple docstring''' warnings.warn( """The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
320
1
"""simple docstring""" import mpmath # for roots of unity import numpy as np class lowerCamelCase__ : """simple docstring""" def __init__( self : Union[str, Any] , UpperCamelCase : List[str]=None , UpperCamelCase : int=None ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = list(poly_a or [0] )[:] __UpperCAmelCase : Optional[Any] = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() __UpperCAmelCase : int = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() __UpperCAmelCase : Dict = len(self.polyB ) # Add 0 to make lengths equal a power of 2 __UpperCAmelCase : List[Any] = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform __UpperCAmelCase : Union[str, Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product __UpperCAmelCase : Dict = self.__multiply() def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Dict = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(UpperCamelCase ) <= 1: return dft[0] # __UpperCAmelCase : Dict = self.c_max_length // 2 while next_ncol > 0: __UpperCAmelCase : List[str] = [[] for i in range(UpperCamelCase )] __UpperCAmelCase : Optional[Any] = self.root**next_ncol # First half of next step __UpperCAmelCase : Dict = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(UpperCamelCase ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step __UpperCAmelCase : Union[str, Any] = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(UpperCamelCase ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update __UpperCAmelCase : int = new_dft __UpperCAmelCase : Tuple = next_ncol // 2 return dft[0] def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.__dft("""A""" ) __UpperCAmelCase : Any = self.__dft("""B""" ) __UpperCAmelCase : Optional[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT __UpperCAmelCase : Tuple = 2 while next_ncol <= self.c_max_length: __UpperCAmelCase : Dict = [[] for i in range(UpperCamelCase )] __UpperCAmelCase : str = self.root ** (next_ncol // 2) __UpperCAmelCase : int = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update __UpperCAmelCase : List[str] = new_inverse_c next_ncol *= 2 # Unpack __UpperCAmelCase : int = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Any = """A = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) __UpperCAmelCase : List[Any] = """B = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) __UpperCAmelCase : str = """A*B = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return f'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = LEDTokenizer __a = LEDTokenizerFast __a = True def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' super().setUp() __UpperCAmelCase : Tuple = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] __UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) __UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} __UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase ) ) def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : List[str] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : str , UpperCamelCase : Any ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self : str ): '''simple docstring''' return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCamelCase , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIn("""input_ids""" , UpperCamelCase ) self.assertIn("""attention_mask""" , UpperCamelCase ) self.assertNotIn("""labels""" , UpperCamelCase ) self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : str = tokenizer( ["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual(batch.input_ids.shape , (2, 5_122) ) @require_torch def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""] __UpperCAmelCase : int = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Tuple = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Optional[Any] = inputs["""input_ids"""] __UpperCAmelCase : List[str] = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""] __UpperCAmelCase : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , padding=UpperCamelCase ) __UpperCAmelCase : str = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]] __UpperCAmelCase : List[Any] = tokenizer.pad(UpperCamelCase ) self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Any = """A, <mask> AllenNLP sentence.""" __UpperCAmelCase : Dict = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) __UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) __UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) __UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
320
1
"""simple docstring""" import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any]=7 , UpperCamelCase : str=3 , UpperCamelCase : Any=18 , UpperCamelCase : Optional[Any]=30 , UpperCamelCase : str=400 , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : List[str]=None , UpperCamelCase : List[str]=True , UpperCamelCase : Tuple=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=[0.5, 0.5, 0.5] , ): '''simple docstring''' __UpperCAmelCase : Any = size if size is not None else {"""height""": 18, """width""": 18} __UpperCAmelCase : Any = parent __UpperCAmelCase : Any = batch_size __UpperCAmelCase : List[str] = num_channels __UpperCAmelCase : Dict = image_size __UpperCAmelCase : List[Any] = min_resolution __UpperCAmelCase : Dict = max_resolution __UpperCAmelCase : Tuple = do_resize __UpperCAmelCase : str = size __UpperCAmelCase : List[Any] = do_normalize __UpperCAmelCase : Optional[int] = image_mean __UpperCAmelCase : int = image_std def lowerCamelCase__ ( self : int ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = DPTImageProcessor if is_vision_available() else None def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[int] = DPTImageProcessingTester(self ) @property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) ) self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(UpperCamelCase , """size""" ) ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) __UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , Image.Image ) # Test not batched input __UpperCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched __UpperCAmelCase : Any = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , np.ndarray ) # Test not batched input __UpperCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched __UpperCAmelCase : int = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , torch.Tensor ) # Test not batched input __UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched __UpperCAmelCase : Tuple = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
320
"""simple docstring""" from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class lowerCamelCase__ : """simple docstring""" def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : List[Any]=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : int=99 , UpperCamelCase : Any=[1, 1, 2] , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Union[str, Any]=8 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=0.0 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Any=3 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : str=None , UpperCamelCase : Tuple=False , ): '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : int = batch_size __UpperCAmelCase : str = seq_length __UpperCAmelCase : Optional[Any] = is_training __UpperCAmelCase : Optional[Any] = use_input_mask __UpperCAmelCase : Tuple = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : Optional[int] = block_sizes __UpperCAmelCase : Optional[Any] = num_decoder_layers __UpperCAmelCase : Union[str, Any] = d_model __UpperCAmelCase : Dict = n_head __UpperCAmelCase : Optional[Any] = d_head __UpperCAmelCase : Dict = d_inner __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout __UpperCAmelCase : List[Any] = attention_dropout __UpperCAmelCase : str = activation_dropout __UpperCAmelCase : Union[str, Any] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : str = 2 __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : List[Any] = num_choices __UpperCAmelCase : Any = scope __UpperCAmelCase : Dict = initializer_std # Used in the tests to check the size of the first attention layer __UpperCAmelCase : Dict = n_head # Used in the tests to check the size of the first hidden state __UpperCAmelCase : Dict = self.d_model # Used in the tests to check the number of output hidden states/attentions __UpperCAmelCase : Dict = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: __UpperCAmelCase : List[Any] = self.num_hidden_layers + 2 def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[str] = None if self.use_input_mask: __UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : str = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : List[str] = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = [input_ids, input_mask] __UpperCAmelCase : Dict = model(UpperCamelCase ) __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : Any = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) __UpperCAmelCase : int = [input_ids, input_mask] __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) __UpperCAmelCase : List[Any] = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : str = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , ): '''simple docstring''' __UpperCAmelCase : Tuple = TFFunnelForPreTraining(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : int = TFFunnelForMaskedLM(config=UpperCamelCase ) __UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_labels __UpperCAmelCase : Optional[Any] = TFFunnelForSequenceClassification(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_choices __UpperCAmelCase : str = TFFunnelForMultipleChoice(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : str = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : int = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : List[str] = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : str = TFFunnelForTokenClassification(config=UpperCamelCase ) __UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Any = TFFunnelForQuestionAnswering(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Any = model(UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : Dict = config_and_inputs __UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowerCamelCase__ ( A , A , unittest.TestCase ): """simple docstring""" __a = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) __a = ( { """feature-extraction""": (TFFunnelBaseModel, TFFunnelModel), """fill-mask""": TFFunnelForMaskedLM, """question-answering""": TFFunnelForQuestionAnswering, """text-classification""": TFFunnelForSequenceClassification, """token-classification""": TFFunnelForTokenClassification, """zero-shot""": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) __a = False __a = False def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModelTester(self ) __UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) @require_tf class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) __a = False __a = False def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : List[str] = TFFunnelModelTester(self , base=UpperCamelCase ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
320
1
"""simple docstring""" from typing import Any class lowerCamelCase__ : """simple docstring""" def __init__( self : Tuple , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : List[str] = data __UpperCAmelCase : List[str] = None class lowerCamelCase__ : """simple docstring""" def __init__( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = None def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : Tuple = self.head while temp is not None: print(temp.data , end=""" """ ) __UpperCAmelCase : Optional[int] = temp.next print() def lowerCamelCase__ ( self : int , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : List[Any] = Node(UpperCamelCase ) __UpperCAmelCase : Optional[int] = self.head __UpperCAmelCase : List[Any] = new_node def lowerCamelCase__ ( self : Any , UpperCamelCase : Tuple , UpperCamelCase : List[str] ): '''simple docstring''' if node_data_a == node_data_a: return else: __UpperCAmelCase : Any = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Optional[Any] = node_a.next __UpperCAmelCase : Optional[Any] = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Dict = node_a.next if node_a is None or node_a is None: return __UpperCAmelCase ,__UpperCAmelCase : Dict = node_a.data, node_a.data if __name__ == "__main__": UpperCAmelCase : Optional[int] = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('After swapping') ll.print_list()
320
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = 0 while b > 0: if b & 1: __UpperCAmelCase : int = ((res % c) + (a % c)) % c a += a b >>= 1 return res
320
1
"""simple docstring""" import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel UpperCAmelCase : Optional[Any] = HfApi() UpperCAmelCase : Tuple = {} # fmt: off UpperCAmelCase : Tuple = torch.tensor([ -0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467, 1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189, -1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839, 0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557 ]) UpperCAmelCase : List[Any] = torch.tensor([ -2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436, 1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208, -2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948, 2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365 ]) UpperCAmelCase : List[str] = torch.tensor([ -0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869, -0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304, -0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925, 0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943 ]) UpperCAmelCase : Union[str, Any] = torch.tensor([ 0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172, -0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309, 0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805, -0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505 ]) UpperCAmelCase : List[str] = torch.tensor([ 0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133, -0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395, 0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559, -0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386 ]) UpperCAmelCase : List[str] = torch.tensor([ 0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078, -0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330, 0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683, -0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431 ]) UpperCAmelCase : Tuple = torch.tensor([ 0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042, -0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398, 0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574, -0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390 ]) UpperCAmelCase : List[str] = torch.tensor([ 0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042, -0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290, 0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746, -0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473 ]) UpperCAmelCase : Dict = torch.tensor([ -1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330, 1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243, -2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810, 1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251]) UpperCAmelCase : int = torch.tensor([ -1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324, 0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181, -2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259, 1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266 ]) UpperCAmelCase : Tuple = torch.tensor([ -1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212, 0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027, -2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131, 1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355 ]) UpperCAmelCase : List[Any] = torch.tensor([ -2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959, 1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351, -3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341, 3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066 ]) UpperCAmelCase : Union[str, Any] = torch.tensor([ -2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740, 1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398, -2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395, 2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243 ]) UpperCAmelCase : Optional[int] = torch.tensor([ -2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336, 1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908, -3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560, 3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343 ]) UpperCAmelCase : Optional[Any] = torch.tensor([ -1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344, 1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391, -2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439, 1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219 ]) # fmt: on UpperCAmelCase : Any = api.list_models(filter='diffusers') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": UpperCAmelCase : Any = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1] print(F"Started running {mod.modelId}!!!") if mod.modelId.startswith('CompVis'): UpperCAmelCase : str = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet') else: UpperCAmelCase : str = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) UpperCAmelCase : List[str] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) UpperCAmelCase : List[Any] = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): UpperCAmelCase : List[Any] = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1E-3 ) print(F"{mod.modelId} has passed successfully!!!")
320
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""image_processor""", """tokenizer"""] __a = """AutoImageProcessor""" __a = """AutoTokenizer""" def __init__( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ): '''simple docstring''' super().__init__(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : str = self.image_processor def __call__( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=None , **UpperCamelCase : Optional[int] ): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: __UpperCAmelCase : List[str] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if images is not None: __UpperCAmelCase : Optional[Any] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: __UpperCAmelCase : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def lowerCamelCase__ ( self : List[str] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
320
1
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow UpperCAmelCase : Optional[int] = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Path , UpperCamelCase : Union[str, None] = None , UpperCamelCase : Union[List[str], None] = None , UpperCamelCase : Union[str, List[str], None] = None , UpperCamelCase : bool = True , ): '''simple docstring''' __UpperCAmelCase : List[str] = [file for file in os.listdir(UpperCamelCase ) if os.path.isfile(os.path.join(UpperCamelCase , UpperCamelCase ) )] if identifier is not None: __UpperCAmelCase : List[str] = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(UpperCamelCase , UpperCamelCase ): for n_ in n_identifier: __UpperCAmelCase : Optional[int] = [file for file in files if n_ not in file] else: __UpperCAmelCase : int = [file for file in files if n_identifier not in file] __UpperCAmelCase : str = ignore_files or [] ignore_files.append("""__init__.py""" ) __UpperCAmelCase : str = [file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , UpperCamelCase ) if only_modules: __UpperCAmelCase : List[Any] = file.split(""".""" )[0] try: __UpperCAmelCase : str = getattr(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : int = doctest.DocTestSuite(UpperCamelCase ) __UpperCAmelCase : int = unittest.TextTestRunner().run(UpperCamelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: __UpperCAmelCase : Optional[Any] = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Dict = Path("""src/transformers""" ) __UpperCAmelCase : Tuple = """modeling""" __UpperCAmelCase : Any = [ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(UpperCamelCase , identifier=UpperCamelCase , ignore_files=UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Dict = Path("""src/transformers""" ) __UpperCAmelCase : Tuple = """tokenization""" self.analyze_directory(UpperCamelCase , identifier=UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : str = Path("""src/transformers""" ) __UpperCAmelCase : List[str] = """configuration""" self.analyze_directory(UpperCamelCase , identifier=UpperCamelCase ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = Path("""src/transformers""" ) __UpperCAmelCase : Union[str, Any] = ["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(UpperCamelCase , n_identifier=UpperCamelCase ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : List[Any] = Path("""docs/source""" ) __UpperCAmelCase : str = ["""favicon.ico"""] self.analyze_directory(UpperCamelCase , ignore_files=UpperCamelCase , only_modules=UpperCamelCase )
320
"""simple docstring""" from __future__ import annotations def lowerCamelCase ( _UpperCamelCase : list[float] , _UpperCamelCase : list[float] ) -> float: '''simple docstring''' __UpperCAmelCase : Tuple = sorted(numsa + numsa ) __UpperCAmelCase ,__UpperCAmelCase : Dict = divmod(len(_UpperCamelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : List[Any] = [float(x) for x in input('Enter the elements of first array: ').split()] UpperCAmelCase : Optional[int] = [float(x) for x in input('Enter the elements of second array: ').split()] print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
320
1
"""simple docstring""" import os def lowerCamelCase ( ) -> Optional[int]: '''simple docstring''' with open(os.path.dirname(_UpperCamelCase ) + """/p022_names.txt""" ) as file: __UpperCAmelCase : List[str] = str(file.readlines()[0] ) __UpperCAmelCase : int = names.replace("""\"""" , """""" ).split(""",""" ) names.sort() __UpperCAmelCase : List[Any] = 0 __UpperCAmelCase : Optional[int] = 0 for i, name in enumerate(_UpperCamelCase ): for letter in name: name_score += ord(_UpperCamelCase ) - 6_4 total_score += (i + 1) * name_score __UpperCAmelCase : Optional[Any] = 0 return total_score if __name__ == "__main__": print(solution())
320
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Optional[int] = tokenizer("""This is me""" , return_tensors="""pt""" ) __UpperCAmelCase : int = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) __UpperCAmelCase : Tuple = model.generate(**UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) __UpperCAmelCase : Tuple = model_reloaded.generate(**UpperCamelCase ) self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase ) ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Any = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(UpperCamelCase ): model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() model.save_pretrained(UpperCamelCase )
320
1
"""simple docstring""" import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCamelCase__ : """simple docstring""" def __init__( self : int , UpperCamelCase : List[str] , UpperCamelCase : List[str]=13 , UpperCamelCase : Union[str, Any]=30 , UpperCamelCase : str=2 , UpperCamelCase : List[Any]=3 , UpperCamelCase : Tuple=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=32 , UpperCamelCase : Any=5 , UpperCamelCase : Tuple=4 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : List[str]=10 , UpperCamelCase : List[str]=0.02 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]=2 , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : str = batch_size __UpperCAmelCase : Optional[Any] = image_size __UpperCAmelCase : Optional[Any] = patch_size __UpperCAmelCase : Dict = num_channels __UpperCAmelCase : int = is_training __UpperCAmelCase : Union[str, Any] = use_labels __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : List[Any] = num_hidden_layers __UpperCAmelCase : Tuple = num_attention_heads __UpperCAmelCase : Dict = intermediate_size __UpperCAmelCase : List[Any] = hidden_act __UpperCAmelCase : List[str] = hidden_dropout_prob __UpperCAmelCase : Optional[int] = attention_probs_dropout_prob __UpperCAmelCase : Optional[Any] = type_sequence_label_size __UpperCAmelCase : Optional[int] = initializer_range __UpperCAmelCase : Optional[int] = scope __UpperCAmelCase : Tuple = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) __UpperCAmelCase : Tuple = (image_size // patch_size) ** 2 __UpperCAmelCase : int = num_patches + 2 def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : int = None if self.use_labels: __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : Dict = self.get_config() return config, pixel_values, labels def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = DeiTModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : str , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : Dict = DeiTForMaskedImageModeling(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __UpperCAmelCase : Tuple = 1 __UpperCAmelCase : Optional[Any] = DeiTForMaskedImageModeling(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __UpperCAmelCase : Any = model(UpperCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict ): '''simple docstring''' __UpperCAmelCase : List[str] = self.type_sequence_label_size __UpperCAmelCase : int = DeiTForImageClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __UpperCAmelCase : str = 1 __UpperCAmelCase : str = DeiTForImageClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __UpperCAmelCase : Dict = model(UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : str = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : Optional[Any] = config_and_inputs __UpperCAmelCase : Optional[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase__ ( A , A , unittest.TestCase ): """simple docstring""" __a = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) __a = ( { """feature-extraction""": DeiTModel, """image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) __a = False __a = False __a = False def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = DeiTModelTester(self ) __UpperCAmelCase : Any = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""DeiT does not use inputs_embeds""" ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' pass def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : int = model_class(UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCAmelCase : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : str = model_class(UpperCamelCase ) __UpperCAmelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : Optional[int] = [*signature.parameters.keys()] __UpperCAmelCase : Optional[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : List[str]=False ): '''simple docstring''' __UpperCAmelCase : Any = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return __UpperCAmelCase ,__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Union[str, Any] = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(UpperCamelCase ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue __UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.train() __UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase ) __UpperCAmelCase : str = model(**UpperCamelCase ).loss loss.backward() def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return __UpperCAmelCase : Dict = False __UpperCAmelCase : Optional[int] = True for model_class in self.all_model_classes: if model_class in get_values(UpperCamelCase ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue __UpperCAmelCase : List[Any] = model_class(UpperCamelCase ) model.gradient_checkpointing_enable() model.to(UpperCamelCase ) model.train() __UpperCAmelCase : Optional[Any] = self._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase ) __UpperCAmelCase : int = model(**UpperCamelCase ).loss loss.backward() def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : List[str] = [ {"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float}, {"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long}, {"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(UpperCamelCase ), *get_values(UpperCamelCase ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ): __UpperCAmelCase : int = problem_type["""title"""] __UpperCAmelCase : Any = problem_type["""num_labels"""] __UpperCAmelCase : List[str] = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.train() __UpperCAmelCase : Union[str, Any] = self._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase ) if problem_type["num_labels"] > 1: __UpperCAmelCase : Any = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] ) __UpperCAmelCase : Union[str, Any] = inputs["""labels"""].to(problem_type["""dtype"""] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=UpperCamelCase ) as warning_list: __UpperCAmelCase : Union[str, Any] = model(**UpperCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Any = DeiTModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def lowerCamelCase ( ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' return ( DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) if is_vision_available() else None ) @slow def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to( UpperCamelCase ) __UpperCAmelCase : List[str] = self.default_image_processor __UpperCAmelCase : str = prepare_img() __UpperCAmelCase : Dict = image_processor(images=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase ) # forward pass with torch.no_grad(): __UpperCAmelCase : Dict = model(**UpperCamelCase ) # verify the logits __UpperCAmelCase : Tuple = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) __UpperCAmelCase : Optional[int] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = DeiTModel.from_pretrained( """facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" ) __UpperCAmelCase : Union[str, Any] = self.default_image_processor __UpperCAmelCase : Dict = prepare_img() __UpperCAmelCase : int = image_processor(images=UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : str = inputs.pixel_values.to(UpperCamelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase )
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCAmelCase : Dict = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCamelCase__ : """simple docstring""" def __init__( self : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any=13 , UpperCamelCase : str=30 , UpperCamelCase : List[Any]=2 , UpperCamelCase : Tuple=3 , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=32 , UpperCamelCase : Dict=2 , UpperCamelCase : Tuple=4 , UpperCamelCase : Optional[Any]=37 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Tuple=0.1 , UpperCamelCase : str=0.1 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : Union[str, Any]=0.02 , UpperCamelCase : Tuple=3 , UpperCamelCase : Tuple=0.6 , UpperCamelCase : List[Any]=None , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : Optional[Any] = batch_size __UpperCAmelCase : str = image_size __UpperCAmelCase : List[str] = patch_size __UpperCAmelCase : int = num_channels __UpperCAmelCase : Union[str, Any] = is_training __UpperCAmelCase : Union[str, Any] = use_labels __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Any = num_hidden_layers __UpperCAmelCase : Tuple = num_attention_heads __UpperCAmelCase : Optional[int] = intermediate_size __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout_prob __UpperCAmelCase : Tuple = attention_probs_dropout_prob __UpperCAmelCase : Union[str, Any] = type_sequence_label_size __UpperCAmelCase : Tuple = initializer_range __UpperCAmelCase : Tuple = mask_ratio __UpperCAmelCase : Tuple = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) __UpperCAmelCase : Optional[int] = (image_size // patch_size) ** 2 __UpperCAmelCase : Any = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : Any = None if self.use_labels: __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : Optional[Any] = self.get_config() return config, pixel_values, labels def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCamelCase__ ( self : int , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : List[str] = TFViTMAEModel(config=UpperCamelCase ) __UpperCAmelCase : str = model(UpperCamelCase , training=UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : str , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Tuple = TFViTMAEForPreTraining(UpperCamelCase ) __UpperCAmelCase : Optional[Any] = model(UpperCamelCase , training=UpperCamelCase ) # expected sequence length = num_patches __UpperCAmelCase : Optional[int] = (self.image_size // self.patch_size) ** 2 __UpperCAmelCase : Dict = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images __UpperCAmelCase : Union[str, Any] = 1 __UpperCAmelCase : Optional[int] = TFViTMAEForPreTraining(UpperCamelCase ) __UpperCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __UpperCAmelCase : Dict = model(UpperCamelCase , training=UpperCamelCase ) __UpperCAmelCase : int = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Any = self.prepare_config_and_inputs() ((__UpperCAmelCase) ,(__UpperCAmelCase) ,(__UpperCAmelCase)) : Any = config_and_inputs __UpperCAmelCase : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class lowerCamelCase__ ( A , A , unittest.TestCase ): """simple docstring""" __a = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () __a = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {} __a = False __a = False __a = False __a = False def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = TFViTMAEModelTester(self ) __UpperCAmelCase : Any = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def lowerCamelCase__ ( self : int ): '''simple docstring''' pass def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Tuple = model_class(UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) __UpperCAmelCase : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase , tf.keras.layers.Layer ) ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Dict = model_class(UpperCamelCase ) __UpperCAmelCase : Tuple = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : Optional[int] = [*signature.parameters.keys()] __UpperCAmelCase : Union[str, Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' np.random.seed(2 ) __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[Any] = int((config.image_size // config.patch_size) ** 2 ) __UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: __UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[Any] = model(UpperCamelCase , noise=UpperCamelCase ) __UpperCAmelCase : Tuple = copy.deepcopy(self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) __UpperCAmelCase : int = model(**UpperCamelCase , noise=UpperCamelCase ) __UpperCAmelCase : str = outputs_dict[0].numpy() __UpperCAmelCase : str = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' np.random.seed(2 ) __UpperCAmelCase ,__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Any = int((config.image_size // config.patch_size) ** 2 ) __UpperCAmelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(UpperCamelCase : Any ): __UpperCAmelCase : Any = {} for k, v in inputs_dict.items(): if tf.is_tensor(UpperCamelCase ): __UpperCAmelCase : List[Any] = v.numpy() else: __UpperCAmelCase : Optional[int] = np.array(UpperCamelCase ) return inputs_np_dict for model_class in self.all_model_classes: __UpperCAmelCase : str = model_class(UpperCamelCase ) __UpperCAmelCase : int = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = prepare_numpy_arrays(UpperCamelCase ) __UpperCAmelCase : Any = model(UpperCamelCase , noise=UpperCamelCase ) __UpperCAmelCase : Tuple = model(**UpperCamelCase , noise=UpperCamelCase ) self.assert_outputs_same(UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] ): '''simple docstring''' np.random.seed(2 ) __UpperCAmelCase : List[str] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) __UpperCAmelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) __UpperCAmelCase : int = tf.constant(UpperCamelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument __UpperCAmelCase : List[str] = tf_noise super().check_pt_tf_models(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' np.random.seed(2 ) __UpperCAmelCase ,__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Union[str, Any] = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(UpperCamelCase ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(UpperCamelCase , UpperCamelCase ),) if isinstance(UpperCamelCase , UpperCamelCase ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(UpperCamelCase , """_keras_serializable""" , UpperCamelCase ) } __UpperCAmelCase : str = int((config.image_size // config.patch_size) ** 2 ) __UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) __UpperCAmelCase : List[Any] = tf.convert_to_tensor(UpperCamelCase ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: __UpperCAmelCase : Tuple = main_layer_class(UpperCamelCase ) __UpperCAmelCase : Tuple = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } __UpperCAmelCase : Optional[int] = tf.keras.Model(UpperCamelCase , outputs=main_layer(UpperCamelCase ) ) __UpperCAmelCase : Optional[int] = model(UpperCamelCase ) with tempfile.TemporaryDirectory() as tmpdirname: __UpperCAmelCase : Any = os.path.join(UpperCamelCase , """keras_model.h5""" ) model.save(UpperCamelCase ) __UpperCAmelCase : Tuple = tf.keras.models.load_model( UpperCamelCase , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(UpperCamelCase , tf.keras.Model ) __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) self.assert_outputs_same(UpperCamelCase , UpperCamelCase ) @slow def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' np.random.seed(2 ) __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 ) __UpperCAmelCase : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: __UpperCAmelCase : Any = model_class(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[int] = model(UpperCamelCase , noise=UpperCamelCase ) if model_class.__name__ == "TFViTMAEModel": __UpperCAmelCase : Optional[int] = outputs.last_hidden_state.numpy() __UpperCAmelCase : Dict = 0 else: __UpperCAmelCase : Dict = outputs.logits.numpy() __UpperCAmelCase : Dict = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase , saved_model=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = model_class.from_pretrained(UpperCamelCase ) __UpperCAmelCase : List[str] = model(UpperCamelCase , noise=UpperCamelCase ) if model_class.__name__ == "TFViTMAEModel": __UpperCAmelCase : Tuple = after_outputs["""last_hidden_state"""].numpy() __UpperCAmelCase : List[Any] = 0 else: __UpperCAmelCase : str = after_outputs["""logits"""].numpy() __UpperCAmelCase : str = 0 __UpperCAmelCase : Dict = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase , 1e-5 ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' np.random.seed(2 ) __UpperCAmelCase ,__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : int = int((config.image_size // config.patch_size) ** 2 ) __UpperCAmelCase : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: __UpperCAmelCase : Optional[Any] = model_class(UpperCamelCase ) __UpperCAmelCase : int = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase , noise=UpperCamelCase ) __UpperCAmelCase : Dict = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(UpperCamelCase ) __UpperCAmelCase : List[Any] = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config __UpperCAmelCase : List[str] = model_class.from_config(model.config ) __UpperCAmelCase : Any = new_model(UpperCamelCase ) # Build model new_model.set_weights(model.get_weights() ) __UpperCAmelCase : str = new_model(UpperCamelCase , noise=UpperCamelCase ) self.assert_outputs_same(UpperCamelCase , UpperCamelCase ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' pass @slow def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : Any = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(UpperCamelCase ) def lowerCamelCase ( ) -> Dict: '''simple docstring''' __UpperCAmelCase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' np.random.seed(2 ) __UpperCAmelCase : Dict = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) __UpperCAmelCase : Optional[int] = self.default_image_processor __UpperCAmelCase : List[Any] = prepare_img() __UpperCAmelCase : Dict = image_processor(images=UpperCamelCase , return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) __UpperCAmelCase : int = ViTMAEConfig() __UpperCAmelCase : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) __UpperCAmelCase : Any = np.random.uniform(size=(1, num_patches) ) # forward pass __UpperCAmelCase : Optional[int] = model(**UpperCamelCase , noise=UpperCamelCase ) # verify the logits __UpperCAmelCase : Tuple = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase , atol=1e-4 )
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase : List[str] = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Dict ) -> str: '''simple docstring''' __UpperCAmelCase : str = [1] for i in range(2 , _UpperCamelCase ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" __UpperCAmelCase : List[str] = [] __UpperCAmelCase : Optional[Any] = list(range(_UpperCamelCase ) ) # Find permutation while factorials: __UpperCAmelCase : List[Any] = factorials.pop() __UpperCAmelCase ,__UpperCAmelCase : str = divmod(_UpperCamelCase , _UpperCamelCase ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = [] __UpperCAmelCase : List[str] = 1 while len(_UpperCamelCase ) < 1E6: constant.append(str(_UpperCamelCase ) ) i += 1 __UpperCAmelCase : List[str] = """""".join(_UpperCamelCase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[9_9] ) * int(constant[9_9_9] ) * int(constant[9_9_9_9] ) * int(constant[9_9_9_9_9] ) * int(constant[9_9_9_9_9_9] ) ) if __name__ == "__main__": print(solution())
320
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float ) -> float: '''simple docstring''' if mass < 0: raise ValueError("""The mass of a body cannot be negative""" ) return 0.5 * mass * abs(_UpperCamelCase ) * abs(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Tuple = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : int ) -> str: '''simple docstring''' if isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError("""'float' object cannot be interpreted as an integer""" ) if isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError("""'str' object cannot be interpreted as an integer""" ) if num == 0: return "0b0" __UpperCAmelCase : Optional[Any] = False if num < 0: __UpperCAmelCase : int = True __UpperCAmelCase : Optional[int] = -num __UpperCAmelCase : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(_UpperCamelCase ) for e in binary ) return "0b" + "".join(str(_UpperCamelCase ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput UpperCAmelCase : Optional[Any] = 'scheduler_config.json' class lowerCamelCase__ ( A ): """simple docstring""" __a = 1 __a = 2 __a = 3 __a = 4 __a = 5 __a = 6 __a = 7 __a = 8 __a = 9 __a = 10 __a = 11 __a = 12 __a = 13 __a = 14 @dataclass class lowerCamelCase__ ( A ): """simple docstring""" __a = 42 class lowerCamelCase__ : """simple docstring""" __a = SCHEDULER_CONFIG_NAME __a = [] __a = True @classmethod def lowerCamelCase__ ( cls : Any , UpperCamelCase : Dict[str, Any] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[Any] = cls.load_config( pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , ) return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : Union[str, os.PathLike] , UpperCamelCase : bool = False , **UpperCamelCase : Optional[Any] ): '''simple docstring''' self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return self._get_compatibles() @classmethod def lowerCamelCase__ ( cls : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = list(set([cls.__name__] + cls._compatibles ) ) __UpperCAmelCase : List[str] = importlib.import_module(__name__.split(""".""" )[0] ) __UpperCAmelCase : List[str] = [ getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase ) ] return compatible_classes
320
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase : Dict = logging.get_logger(__name__) UpperCAmelCase : List[Any] = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class lowerCamelCase__ ( A ): """simple docstring""" __a = """roberta""" def __init__( self : List[Any] , UpperCamelCase : List[str]=50_265 , UpperCamelCase : Optional[Any]=768 , UpperCamelCase : List[Any]=12 , UpperCamelCase : Tuple=12 , UpperCamelCase : Tuple=3_072 , UpperCamelCase : List[str]="gelu" , UpperCamelCase : int=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Optional[Any]=512 , UpperCamelCase : Optional[int]=2 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : int=1e-1_2 , UpperCamelCase : List[Any]=1 , UpperCamelCase : Tuple=0 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : int="absolute" , UpperCamelCase : Any=True , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : str = vocab_size __UpperCAmelCase : str = hidden_size __UpperCAmelCase : Any = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : Tuple = hidden_act __UpperCAmelCase : List[str] = intermediate_size __UpperCAmelCase : Optional[Any] = hidden_dropout_prob __UpperCAmelCase : Any = attention_probs_dropout_prob __UpperCAmelCase : Optional[Any] = max_position_embeddings __UpperCAmelCase : int = type_vocab_size __UpperCAmelCase : str = initializer_range __UpperCAmelCase : Tuple = layer_norm_eps __UpperCAmelCase : int = position_embedding_type __UpperCAmelCase : str = use_cache __UpperCAmelCase : Optional[int] = classifier_dropout class lowerCamelCase__ ( A ): """simple docstring""" @property def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' if self.task == "multiple-choice": __UpperCAmelCase : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __UpperCAmelCase : Tuple = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
320
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase__ ( *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ): '''simple docstring''' pass def lowerCamelCase ( _UpperCamelCase : Image ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = hashlib.mda(image.tobytes() ) return m.hexdigest()[:1_0] def lowerCamelCase ( _UpperCamelCase : Image ) -> Dict: '''simple docstring''' __UpperCAmelCase : Tuple = np.array(_UpperCamelCase ) __UpperCAmelCase : List[Any] = npimg.shape return {"hash": hashimage(_UpperCamelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" __a = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __a = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' pass @require_tf @unittest.skip("""Image segmentation not implemented in TF""" ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Tuple = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" ) __UpperCAmelCase : Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : int = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, {"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967}, {"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993}, {"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909}, {"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879}, {"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834}, {"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716}, {"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612}, {"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599}, {"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552}, {"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532}, {"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516}, {"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499}, {"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483}, {"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464}, {"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408}, {"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335}, {"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326}, {"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262}, {"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999}, {"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986}, {"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984}, {"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873}, {"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871} ] , ) # fmt: on @require_torch @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Any = """facebook/sam-vit-huge""" __UpperCAmelCase : str = pipeline("""mask-generation""" , model=UpperCamelCase ) __UpperCAmelCase : int = image_segmenter( """http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : Dict = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, ] , )
320
1
"""simple docstring""" from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 UpperCAmelCase : List[Any] = { # 1536-bit 5: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 2048-bit 14: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AACAA68FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 3072-bit 15: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64' + 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7' + 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B' + 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31' + '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 4096-bit 16: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64' + 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7' + 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B' + 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31' + '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7' + '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA' + '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6' + '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED' + '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9' + '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199' + 'FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 6144-bit 17: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08' + '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B' + '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9' + 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6' + '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8' + 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C' + '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718' + '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D' + '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D' + 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226' + '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC' + 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26' + '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB' + '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2' + '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127' + 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492' + '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406' + 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918' + 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151' + '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03' + 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F' + 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA' + 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B' + 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632' + '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E' + '6DCC4024FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 8192-bit 18: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64' + 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7' + 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B' + 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31' + '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7' + '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA' + '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6' + '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED' + '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9' + '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492' + '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD' + 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831' + '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B' + 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF' + '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6' + 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3' + '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA' + 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328' + '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C' + 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE' + '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4' + '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300' + '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568' + '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9' + '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B' + '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A' + '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36' + '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1' + 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92' + '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47' + '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71' + '60C980DD98EDD3DFFFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, } class lowerCamelCase__ : """simple docstring""" def __init__( self : str , UpperCamelCase : int = 14 ): '''simple docstring''' if group not in primes: raise ValueError("""Unsupported Group""" ) __UpperCAmelCase : Union[str, Any] = primes[group]["""prime"""] __UpperCAmelCase : Union[str, Any] = primes[group]["""generator"""] __UpperCAmelCase : Tuple = int(hexlify(urandom(32 ) ) , base=16 ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' return hex(self.__private_key )[2:] def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Tuple = pow(self.generator , self.__private_key , self.prime ) return hex(UpperCamelCase )[2:] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : int ): '''simple docstring''' return ( 2 <= key <= self.prime - 2 and pow(UpperCamelCase , (self.prime - 1) // 2 , self.prime ) == 1 ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Optional[int] = int(UpperCamelCase , base=16 ) if not self.is_valid_public_key(UpperCamelCase ): raise ValueError("""Invalid public key""" ) __UpperCAmelCase : Dict = pow(UpperCamelCase , self.__private_key , self.prime ) return shaaaa(str(UpperCamelCase ).encode() ).hexdigest() @staticmethod def lowerCamelCase__ ( UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' return ( 2 <= remote_public_key_str <= prime - 2 and pow(UpperCamelCase , (prime - 1) // 2 , UpperCamelCase ) == 1 ) @staticmethod def lowerCamelCase__ ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : int = 14 ): '''simple docstring''' __UpperCAmelCase : List[str] = int(UpperCamelCase , base=16 ) __UpperCAmelCase : Tuple = int(UpperCamelCase , base=16 ) __UpperCAmelCase : Any = primes[group]["""prime"""] if not DiffieHellman.is_valid_public_key_static(UpperCamelCase , UpperCamelCase ): raise ValueError("""Invalid public key""" ) __UpperCAmelCase : Tuple = pow(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return shaaaa(str(UpperCamelCase ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , UpperCamelCase : str ): '''simple docstring''' super().__init__() __UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase ) __UpperCAmelCase : int = list(model.children() )[:-2] __UpperCAmelCase : List[Any] = nn.Sequential(*UpperCamelCase ) __UpperCAmelCase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.pool(self.model(UpperCamelCase ) ) __UpperCAmelCase : List[Any] = torch.flatten(UpperCamelCase , start_dim=2 ) __UpperCAmelCase : Any = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )] __UpperCAmelCase : Any = os.path.dirname(UpperCamelCase ) __UpperCAmelCase : List[str] = tokenizer __UpperCAmelCase : str = labels __UpperCAmelCase : Optional[int] = len(UpperCamelCase ) __UpperCAmelCase : int = max_seq_length __UpperCAmelCase : int = transforms def __len__( self : List[str] ): '''simple docstring''' return len(self.data ) def __getitem__( self : List[str] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) ) __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1] __UpperCAmelCase : Any = sentence[: self.max_seq_length] __UpperCAmelCase : Tuple = torch.zeros(self.n_classes ) __UpperCAmelCase : str = 1 __UpperCAmelCase : Any = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) __UpperCAmelCase : Optional[int] = self.transforms(UpperCamelCase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Any = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = [len(row["""sentence"""] ) for row in batch] __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase ) __UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) __UpperCAmelCase : str = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ): __UpperCAmelCase : List[str] = input_row["""sentence"""] __UpperCAmelCase : Tuple = 1 __UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] ) __UpperCAmelCase : Optional[Any] = torch.stack([row["""label"""] for row in batch] ) __UpperCAmelCase : str = torch.stack([row["""image_start_token"""] for row in batch] ) __UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ) -> int: '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' return transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ), ] )
320
1
"""simple docstring""" import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig UpperCAmelCase : List[Any] = { 'facebook/maskformer-swin-base-ade': ( 'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json' ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } UpperCAmelCase : Any = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" __a = """maskformer""" __a = {"""hidden_size""": """mask_feature_size"""} __a = ["""resnet""", """swin"""] __a = ["""detr"""] def __init__( self : Tuple , UpperCamelCase : int = 256 , UpperCamelCase : int = 256 , UpperCamelCase : float = 0.1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[Dict] = None , UpperCamelCase : Optional[Dict] = None , UpperCamelCase : float = 0.02 , UpperCamelCase : float = 1.0 , UpperCamelCase : float = 1.0 , UpperCamelCase : float = 1.0 , UpperCamelCase : float = 20.0 , UpperCamelCase : Optional[bool] = None , **UpperCamelCase : List[str] , ): '''simple docstring''' if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k __UpperCAmelCase : List[Any] = SwinConfig( image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) if isinstance(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : Any = backbone_config.pop("""model_type""" ) __UpperCAmelCase : int = CONFIG_MAPPING[backbone_model_type] __UpperCAmelCase : Any = config_class.from_dict(UpperCamelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. ''' f'''Supported model types: {",".join(self.backbones_supported )}''' ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 __UpperCAmelCase : Any = DetrConfig() else: # verify that the decoder is supported __UpperCAmelCase : Optional[int] = ( decoder_config.pop("""model_type""" ) if isinstance(UpperCamelCase , UpperCamelCase ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( f'''Transformer Decoder {decoder_type} not supported, please use one of''' f''' {",".join(self.decoders_supported )}''' ) if isinstance(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : List[Any] = CONFIG_MAPPING[decoder_type] __UpperCAmelCase : List[Any] = config_class.from_dict(UpperCamelCase ) __UpperCAmelCase : Any = backbone_config __UpperCAmelCase : Dict = decoder_config # main feature dimension for the model __UpperCAmelCase : List[str] = fpn_feature_size __UpperCAmelCase : List[str] = mask_feature_size # initializer __UpperCAmelCase : List[str] = init_std __UpperCAmelCase : str = init_xavier_std # Hungarian matcher && loss __UpperCAmelCase : Optional[int] = cross_entropy_weight __UpperCAmelCase : str = dice_weight __UpperCAmelCase : List[str] = mask_weight __UpperCAmelCase : Tuple = use_auxiliary_loss __UpperCAmelCase : Union[str, Any] = no_object_weight __UpperCAmelCase : Optional[int] = output_auxiliary_logits __UpperCAmelCase : Optional[int] = self.decoder_config.encoder_attention_heads __UpperCAmelCase : List[Any] = self.decoder_config.num_hidden_layers super().__init__(**UpperCamelCase ) @classmethod def lowerCamelCase__ ( cls : Optional[Any] , UpperCamelCase : PretrainedConfig , UpperCamelCase : PretrainedConfig , **UpperCamelCase : Any ): '''simple docstring''' return cls( backbone_config=UpperCamelCase , decoder_config=UpperCamelCase , **UpperCamelCase , ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ ) __UpperCAmelCase : List[str] = self.backbone_config.to_dict() __UpperCAmelCase : Dict = self.decoder_config.to_dict() __UpperCAmelCase : List[Any] = self.__class__.model_type return output
320
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
320
1
"""simple docstring""" import argparse import json import subprocess def lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : str = [] __UpperCAmelCase : Any = ( f'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"''' """ https://api.github.com/repos/huggingface/transformers/actions/runners""" ) __UpperCAmelCase : List[Any] = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE ) __UpperCAmelCase : Optional[int] = output.stdout.decode("""utf-8""" ) __UpperCAmelCase : Dict = json.loads(_UpperCamelCase ) __UpperCAmelCase : int = status["""runners"""] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(_UpperCamelCase ) # save the result so we can report them on Slack with open("""offline_runners.txt""" , """w""" ) as fp: fp.write(json.dumps(_UpperCamelCase ) ) if len(_UpperCamelCase ) > 0: __UpperCAmelCase : Tuple = """\n""".join([x["""name"""] for x in offline_runners] ) raise ValueError(f'''The following runners are offline:\n{failed}''' ) if __name__ == "__main__": def lowerCamelCase ( _UpperCamelCase : List[str] ) -> Optional[int]: '''simple docstring''' return values.split(""",""" ) UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--target_runners', default=None, type=list_str, required=True, help='Comma-separated list of runners to check status.', ) parser.add_argument( '--token', default=None, type=str, required=True, help='A token that has actions:read permission.' ) UpperCAmelCase : Union[str, Any] = parser.parse_args() get_runner_status(args.target_runners, args.token)
320
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ) __UpperCAmelCase : List[Any] = sum(_UpperCamelCase ) __UpperCAmelCase : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __UpperCAmelCase : Any = True for i in range(1 , s + 1 ): __UpperCAmelCase : List[Any] = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __UpperCAmelCase : Optional[int] = dp[i][j - 1] if arr[i - 1] <= j: __UpperCAmelCase : Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __UpperCAmelCase : Optional[int] = s - 2 * j break return diff
320
1
"""simple docstring""" import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class lowerCamelCase__ ( A ): """simple docstring""" __a = """M-CLIP""" def __init__( self : Tuple , UpperCamelCase : str=1_024 , UpperCamelCase : Any=768 , **UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : str = transformerDimSize __UpperCAmelCase : List[str] = imageDimSize super().__init__(**UpperCamelCase ) class lowerCamelCase__ ( A ): """simple docstring""" __a = MCLIPConfig def __init__( self : Any , UpperCamelCase : List[str] , *UpperCamelCase : Tuple , **UpperCamelCase : List[str] ): '''simple docstring''' super().__init__(UpperCamelCase , *UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : str = XLMRobertaModel(UpperCamelCase ) __UpperCAmelCase : Optional[int] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.transformer(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )[0] __UpperCAmelCase : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(UpperCamelCase ), embs
320
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase : Optional[int] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""pixel_values"""] def __init__( self : Tuple , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : int = do_resize __UpperCAmelCase : Tuple = size __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : Any = do_center_crop __UpperCAmelCase : int = crop_size __UpperCAmelCase : Optional[int] = do_rescale __UpperCAmelCase : List[Any] = rescale_factor __UpperCAmelCase : Tuple = do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD __UpperCAmelCase : List[Any] = do_convert_rgb def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __UpperCAmelCase : int = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Dict = size if size is not None else self.size __UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Dict = resample if resample is not None else self.resample __UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Any = image_std if image_std is not None else self.image_std __UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCAmelCase : List[str] = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCAmelCase : int = [convert_to_rgb(UpperCamelCase ) for image in images] # All transformations expect numpy arrays. __UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: __UpperCAmelCase : Optional[int] = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: __UpperCAmelCase : int = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: __UpperCAmelCase : Dict = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: __UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] __UpperCAmelCase : Any = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] __UpperCAmelCase : Any = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
320
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase : Tuple = { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class lowerCamelCase__ ( A ): """simple docstring""" __a = """roformer""" def __init__( self : Tuple , UpperCamelCase : Any=50_000 , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=768 , UpperCamelCase : Tuple=12 , UpperCamelCase : int=12 , UpperCamelCase : Dict=3_072 , UpperCamelCase : str="gelu" , UpperCamelCase : Tuple=0.1 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[int]=1_536 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Dict=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : Optional[int]=0 , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Any=True , **UpperCamelCase : Dict , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Any = vocab_size __UpperCAmelCase : List[str] = hidden_size if embedding_size is None else embedding_size __UpperCAmelCase : str = hidden_size __UpperCAmelCase : List[str] = num_hidden_layers __UpperCAmelCase : List[Any] = num_attention_heads __UpperCAmelCase : Dict = hidden_act __UpperCAmelCase : Optional[int] = intermediate_size __UpperCAmelCase : Optional[int] = hidden_dropout_prob __UpperCAmelCase : List[Any] = attention_probs_dropout_prob __UpperCAmelCase : List[Any] = max_position_embeddings __UpperCAmelCase : List[str] = type_vocab_size __UpperCAmelCase : Union[str, Any] = initializer_range __UpperCAmelCase : Tuple = layer_norm_eps __UpperCAmelCase : int = rotary_value __UpperCAmelCase : Optional[Any] = use_cache class lowerCamelCase__ ( A ): """simple docstring""" @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' if self.task == "multiple-choice": __UpperCAmelCase : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __UpperCAmelCase : Optional[Any] = {0: """batch""", 1: """sequence"""} __UpperCAmelCase : List[Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
320
"""simple docstring""" from collections.abc import Sequence def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) ) def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' __UpperCAmelCase : Dict = 0.0 for coeff in reversed(_UpperCamelCase ): __UpperCAmelCase : Any = result * x + coeff return result if __name__ == "__main__": UpperCAmelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCAmelCase : str = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
320
1
"""simple docstring""" import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase : int = 16 UpperCAmelCase : Union[str, Any] = 32 def lowerCamelCase ( _UpperCamelCase : Accelerator , _UpperCamelCase : int = 1_6 ) -> str: '''simple docstring''' __UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) __UpperCAmelCase : List[Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(_UpperCamelCase : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) __UpperCAmelCase : Tuple = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_UpperCamelCase , max_length=_UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __UpperCAmelCase : List[str] = datasets.map( _UpperCamelCase , batched=_UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __UpperCAmelCase : str = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(_UpperCamelCase : str ): # On TPU it's best to pad everything to the same length or training will be very slow. __UpperCAmelCase : Union[str, Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __UpperCAmelCase : List[Any] = 1_6 elif accelerator.mixed_precision != "no": __UpperCAmelCase : Dict = 8 else: __UpperCAmelCase : Optional[int] = None return tokenizer.pad( _UpperCamelCase , padding="""longest""" , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. __UpperCAmelCase : Optional[Any] = DataLoader( tokenized_datasets["""train"""] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase , drop_last=_UpperCamelCase ) __UpperCAmelCase : Optional[int] = DataLoader( tokenized_datasets["""validation"""] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , ) return train_dataloader, eval_dataloader def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Any ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCAmelCase : Tuple = config["""lr"""] __UpperCAmelCase : int = int(config["""num_epochs"""] ) __UpperCAmelCase : List[Any] = int(config["""seed"""] ) __UpperCAmelCase : Any = int(config["""batch_size"""] ) __UpperCAmelCase : List[Any] = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation __UpperCAmelCase : int = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __UpperCAmelCase : Tuple = batch_size // MAX_GPU_BATCH_SIZE __UpperCAmelCase : Optional[Any] = MAX_GPU_BATCH_SIZE set_seed(_UpperCamelCase ) __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = get_dataloaders(_UpperCamelCase , _UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_UpperCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __UpperCAmelCase : Optional[int] = model.to(accelerator.device ) # Instantiate optimizer __UpperCAmelCase : List[str] = AdamW(params=model.parameters() , lr=_UpperCamelCase ) # Instantiate scheduler __UpperCAmelCase : Optional[int] = get_linear_schedule_with_warmup( optimizer=_UpperCamelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(_UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Now we train the model for epoch in range(_UpperCamelCase ): model.train() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __UpperCAmelCase : Tuple = model(**_UpperCamelCase ) __UpperCAmelCase : str = outputs.loss __UpperCAmelCase : int = loss / gradient_accumulation_steps accelerator.backward(_UpperCamelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __UpperCAmelCase : str = model(**_UpperCamelCase ) __UpperCAmelCase : int = outputs.logits.argmax(dim=-1 ) __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=_UpperCamelCase , references=_UpperCamelCase , ) __UpperCAmelCase : Union[str, Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , _UpperCamelCase ) def lowerCamelCase ( ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=_UpperCamelCase , default=_UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) __UpperCAmelCase : Optional[int] = parser.parse_args() __UpperCAmelCase : Union[str, Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6} training_function(_UpperCamelCase , _UpperCamelCase ) if __name__ == "__main__": main()
320
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase : Optional[int] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class lowerCamelCase__ : """simple docstring""" __a = PegasusConfig __a = {} __a = """gelu""" def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Any=4 , UpperCamelCase : Tuple=37 , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : List[str]=2 , UpperCamelCase : int=1 , UpperCamelCase : Optional[Any]=0 , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : str = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Dict = is_training __UpperCAmelCase : Dict = use_labels __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[Any] = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : List[Any] = max_position_embeddings __UpperCAmelCase : Any = eos_token_id __UpperCAmelCase : Optional[int] = pad_token_id __UpperCAmelCase : List[str] = bos_token_id def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __UpperCAmelCase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCAmelCase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCAmelCase : Any = prepare_pegasus_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, inputs_dict def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : Tuple = model_class_name(UpperCamelCase ) __UpperCAmelCase : List[Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : int = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Tuple = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Dict = model.decode(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : int = model_class_name(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : Dict = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : int = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : List[str] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Union[str, Any] = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Any=None , ) -> Dict: '''simple docstring''' if attention_mask is None: __UpperCAmelCase : Optional[int] = np.not_equal(_UpperCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __UpperCAmelCase : Dict = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __a = True __a = False __a = False __a = False def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = FlaxPegasusModelTester(self ) __UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = model_class(UpperCamelCase ) @jax.jit def encode_jitted(UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]=None , **UpperCamelCase : List[str] ): return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Tuple = encode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : Optional[int] = encode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : int = model_class(UpperCamelCase ) __UpperCAmelCase : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __UpperCAmelCase : Any = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ): return model.decode( decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : str = decode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCamelCase ) __UpperCAmelCase : Optional[int] = np.ones((1, 1) ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : List[Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCAmelCase : List[str] = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""np""" , truncation=UpperCamelCase , max_length=512 , padding=UpperCamelCase ) __UpperCAmelCase : int = model.generate(**UpperCamelCase , num_beams=2 ).sequences __UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) assert tgt_text == decoded
320
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) def lowerCamelCase ( _UpperCamelCase : Tuple ) -> List[List[ImageInput]]: '''simple docstring''' if isinstance(_UpperCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(_UpperCamelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(_UpperCamelCase ): return [[videos]] raise ValueError(f'''Could not make batched video from {videos}''' ) class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""pixel_values"""] def __init__( self : Dict , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , **UpperCamelCase : Tuple , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Dict = size if size is not None else {"""shortest_edge""": 224} __UpperCAmelCase : List[Any] = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : Dict = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : Union[str, Any] = get_size_dict(UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : Any = do_resize __UpperCAmelCase : int = size __UpperCAmelCase : int = do_center_crop __UpperCAmelCase : Optional[int] = crop_size __UpperCAmelCase : Any = resample __UpperCAmelCase : List[str] = do_rescale __UpperCAmelCase : Dict = rescale_factor __UpperCAmelCase : List[Any] = do_normalize __UpperCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __UpperCAmelCase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCamelCase__ ( self : str , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Tuple = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" in size: __UpperCAmelCase : List[str] = get_resize_output_image_size(UpperCamelCase , size["""shortest_edge"""] , default_to_square=UpperCamelCase ) elif "height" in size and "width" in size: __UpperCAmelCase : List[str] = (size["""height"""], size["""width"""]) else: raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Tuple , ): '''simple docstring''' __UpperCAmelCase : int = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[int] , ): '''simple docstring''' return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : str , ): '''simple docstring''' return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ): '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __UpperCAmelCase : Optional[int] = to_numpy_array(UpperCamelCase ) if do_resize: __UpperCAmelCase : List[str] = self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) if do_center_crop: __UpperCAmelCase : int = self.center_crop(UpperCamelCase , size=UpperCamelCase ) if do_rescale: __UpperCAmelCase : Tuple = self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) if do_normalize: __UpperCAmelCase : Optional[int] = self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) __UpperCAmelCase : List[str] = to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) return image def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase : Optional[Any] , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Any = resample if resample is not None else self.resample __UpperCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std __UpperCAmelCase : str = size if size is not None else self.size __UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Tuple = get_size_dict(UpperCamelCase , param_name="""crop_size""" ) if not valid_images(UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) __UpperCAmelCase : Optional[Any] = make_batched(UpperCamelCase ) __UpperCAmelCase : str = [ [ self._preprocess_image( image=UpperCamelCase , do_resize=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , do_center_crop=UpperCamelCase , crop_size=UpperCamelCase , do_rescale=UpperCamelCase , rescale_factor=UpperCamelCase , do_normalize=UpperCamelCase , image_mean=UpperCamelCase , image_std=UpperCamelCase , data_format=UpperCamelCase , ) for img in video ] for video in videos ] __UpperCAmelCase : Optional[int] = {"""pixel_values""": videos} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
320
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : List[str] = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } UpperCAmelCase : List[str] = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = EfficientNetConfig() __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""hidden_dim"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""] __UpperCAmelCase : str = CONFIG_MAP[model_name]["""depth_coef"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""dropout_rate"""] __UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""] __UpperCAmelCase : int = """huggingface/label-files""" __UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json""" __UpperCAmelCase : str = 1_0_0_0 __UpperCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : Dict = idalabel __UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" __UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im def lowerCamelCase ( _UpperCamelCase : Any ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : List[str] = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , ) return preprocessor def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] __UpperCAmelCase : str = sorted(set(_UpperCamelCase ) ) __UpperCAmelCase : Optional[int] = len(_UpperCamelCase ) __UpperCAmelCase : Any = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )} __UpperCAmelCase : Any = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: __UpperCAmelCase : List[str] = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) __UpperCAmelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: __UpperCAmelCase : Optional[Any] = """efficientnet.""" + item[1] __UpperCAmelCase : Tuple = """classifier.weight""" __UpperCAmelCase : Optional[int] = """classifier.bias""" return key_mapping def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Tuple: '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue __UpperCAmelCase : List[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: __UpperCAmelCase : int = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __UpperCAmelCase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __UpperCAmelCase : List[str] = torch.from_numpy(np.transpose(_UpperCamelCase ) ) else: __UpperCAmelCase : Tuple = torch.from_numpy(_UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCamelCase ) @torch.no_grad() def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = model_classes[model_name]( include_top=_UpperCamelCase , weights="""imagenet""" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , ) __UpperCAmelCase : List[str] = original_model.trainable_variables __UpperCAmelCase : List[Any] = original_model.non_trainable_variables __UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __UpperCAmelCase : int = param.numpy() __UpperCAmelCase : Dict = list(tf_params.keys() ) # Load HuggingFace model __UpperCAmelCase : Optional[Any] = get_efficientnet_config(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(_UpperCamelCase ).eval() __UpperCAmelCase : Any = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) __UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase ) replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Initialize preprocessor and preprocess input image __UpperCAmelCase : List[Any] = convert_image_processor(_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[int] = hf_model(**_UpperCamelCase ) __UpperCAmelCase : Any = outputs.logits.detach().numpy() # Original model inference __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __UpperCAmelCase : Optional[Any] = image.img_to_array(_UpperCamelCase ) __UpperCAmelCase : Tuple = np.expand_dims(_UpperCamelCase , axis=0 ) __UpperCAmelCase : str = original_model.predict(_UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCamelCase ): os.mkdir(_UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCamelCase ) preprocessor.save_pretrained(_UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) __UpperCAmelCase : List[str] = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_UpperCamelCase ) hf_model.push_to_hub(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') UpperCAmelCase : Any = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
320
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class lowerCamelCase__ : """simple docstring""" def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : List[Any]=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : int=99 , UpperCamelCase : Any=[1, 1, 2] , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Union[str, Any]=8 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=0.0 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Any=3 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : str=None , UpperCamelCase : Tuple=False , ): '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : int = batch_size __UpperCAmelCase : str = seq_length __UpperCAmelCase : Optional[Any] = is_training __UpperCAmelCase : Optional[Any] = use_input_mask __UpperCAmelCase : Tuple = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : Optional[int] = block_sizes __UpperCAmelCase : Optional[Any] = num_decoder_layers __UpperCAmelCase : Union[str, Any] = d_model __UpperCAmelCase : Dict = n_head __UpperCAmelCase : Optional[Any] = d_head __UpperCAmelCase : Dict = d_inner __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout __UpperCAmelCase : List[Any] = attention_dropout __UpperCAmelCase : str = activation_dropout __UpperCAmelCase : Union[str, Any] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : str = 2 __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : List[Any] = num_choices __UpperCAmelCase : Any = scope __UpperCAmelCase : Dict = initializer_std # Used in the tests to check the size of the first attention layer __UpperCAmelCase : Dict = n_head # Used in the tests to check the size of the first hidden state __UpperCAmelCase : Dict = self.d_model # Used in the tests to check the number of output hidden states/attentions __UpperCAmelCase : Dict = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: __UpperCAmelCase : List[Any] = self.num_hidden_layers + 2 def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[str] = None if self.use_input_mask: __UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : str = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : List[str] = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = [input_ids, input_mask] __UpperCAmelCase : Dict = model(UpperCamelCase ) __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : Any = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) __UpperCAmelCase : int = [input_ids, input_mask] __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) __UpperCAmelCase : List[Any] = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : str = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , ): '''simple docstring''' __UpperCAmelCase : Tuple = TFFunnelForPreTraining(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : int = TFFunnelForMaskedLM(config=UpperCamelCase ) __UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_labels __UpperCAmelCase : Optional[Any] = TFFunnelForSequenceClassification(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_choices __UpperCAmelCase : str = TFFunnelForMultipleChoice(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : str = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : int = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : List[str] = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : str = TFFunnelForTokenClassification(config=UpperCamelCase ) __UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Any = TFFunnelForQuestionAnswering(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Any = model(UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : Dict = config_and_inputs __UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowerCamelCase__ ( A , A , unittest.TestCase ): """simple docstring""" __a = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) __a = ( { """feature-extraction""": (TFFunnelBaseModel, TFFunnelModel), """fill-mask""": TFFunnelForMaskedLM, """question-answering""": TFFunnelForQuestionAnswering, """text-classification""": TFFunnelForSequenceClassification, """token-classification""": TFFunnelForTokenClassification, """zero-shot""": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) __a = False __a = False def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModelTester(self ) __UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) @require_tf class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) __a = False __a = False def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : List[str] = TFFunnelModelTester(self , base=UpperCamelCase ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
320
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""keras_nlp"""] def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ): '''simple docstring''' requires_backends(self , ["""keras_nlp"""] )
320
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase__ ( A , A , unittest.TestCase ): """simple docstring""" __a = StableDiffusionXLImgaImgPipeline __a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} __a = PipelineTesterMixin.required_optional_params - {"""latents"""} __a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __a = IMAGE_TO_IMAGE_IMAGE_PARAMS __a = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' torch.manual_seed(0 ) __UpperCAmelCase : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) __UpperCAmelCase : Optional[Any] = EulerDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , ) torch.manual_seed(0 ) __UpperCAmelCase : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __UpperCAmelCase : List[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , ) __UpperCAmelCase : str = CLIPTextModel(UpperCamelCase ) __UpperCAmelCase : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCamelCase ) __UpperCAmelCase : List[str] = CLIPTextModelWithProjection(UpperCamelCase ) __UpperCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """text_encoder_2""": text_encoder_a, """tokenizer_2""": tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any=0 ): '''simple docstring''' __UpperCAmelCase : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) __UpperCAmelCase : int = image / 2 + 0.5 if str(UpperCamelCase ).startswith("""mps""" ): __UpperCAmelCase : Union[str, Any] = torch.manual_seed(UpperCamelCase ) else: __UpperCAmelCase : Union[str, Any] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) __UpperCAmelCase : int = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 5.0, """output_type""": """numpy""", """strength""": 0.75, } return inputs def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase : List[str] = self.get_dummy_components() __UpperCAmelCase : Optional[Any] = StableDiffusionXLImgaImgPipeline(**UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = sd_pipe.to(UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase ) __UpperCAmelCase : List[str] = self.get_dummy_inputs(UpperCamelCase ) __UpperCAmelCase : int = sd_pipe(**UpperCamelCase ).images __UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __UpperCAmelCase : str = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' pass def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.get_dummy_components() __UpperCAmelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**UpperCamelCase ) __UpperCAmelCase : str = sd_pipe.to(UpperCamelCase ) __UpperCAmelCase : List[str] = sd_pipe.to(UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase ) # forward without prompt embeds __UpperCAmelCase : List[str] = self.get_dummy_inputs(UpperCamelCase ) __UpperCAmelCase : Dict = 3 * ["""this is a negative prompt"""] __UpperCAmelCase : Optional[Any] = negative_prompt __UpperCAmelCase : str = 3 * [inputs["""prompt"""]] __UpperCAmelCase : Tuple = sd_pipe(**UpperCamelCase ) __UpperCAmelCase : List[str] = output.images[0, -3:, -3:, -1] # forward with prompt embeds __UpperCAmelCase : List[str] = self.get_dummy_inputs(UpperCamelCase ) __UpperCAmelCase : Optional[int] = 3 * ["""this is a negative prompt"""] __UpperCAmelCase : int = 3 * [inputs.pop("""prompt""" )] ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : List[str] = sd_pipe.encode_prompt(UpperCamelCase , negative_prompt=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = sd_pipe( **UpperCamelCase , prompt_embeds=UpperCamelCase , negative_prompt_embeds=UpperCamelCase , pooled_prompt_embeds=UpperCamelCase , negative_pooled_prompt_embeds=UpperCamelCase , ) __UpperCAmelCase : Optional[int] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Tuple="cpu" , UpperCamelCase : Union[str, Any]=torch.floataa , UpperCamelCase : Union[str, Any]=0 ): '''simple docstring''' __UpperCAmelCase : str = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) __UpperCAmelCase : str = np.random.RandomState(UpperCamelCase ).standard_normal((1, 4, 64, 64) ) __UpperCAmelCase : List[str] = torch.from_numpy(UpperCamelCase ).to(device=UpperCamelCase , dtype=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Optional[int] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" ) pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) __UpperCAmelCase : int = self.get_inputs(UpperCamelCase ) __UpperCAmelCase : Optional[Any] = pipe(**UpperCamelCase ).images __UpperCAmelCase : Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __UpperCAmelCase : Optional[int] = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] ) assert np.abs(image_slice - expected_slice ).max() < 7e-3
320
"""simple docstring""" UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_UpperCamelCase ) __UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data ) __UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0 if padding_needed: # The padding that will be added later __UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6) else: __UpperCAmelCase : List[str] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode() + padding ) def lowerCamelCase ( _UpperCamelCase : str ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Tuple = ( """argument should be a bytes-like object or ASCII string, """ f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_UpperCamelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_UpperCamelCase , _UpperCamelCase ): try: __UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) __UpperCAmelCase : str = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one __UpperCAmelCase : List[str] = encoded_data[:-padding] __UpperCAmelCase : int = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: __UpperCAmelCase : Optional[Any] = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data ) __UpperCAmelCase : List[Any] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_UpperCamelCase ) , 8 ) ] return bytes(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
320
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""flax"""] def __init__( self : Tuple , *UpperCamelCase : List[str] , **UpperCamelCase : Optional[Any] ): '''simple docstring''' requires_backends(self , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : List[Any] , *UpperCamelCase : Dict , **UpperCamelCase : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : int , *UpperCamelCase : Tuple , **UpperCamelCase : Tuple ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""flax"""] def __init__( self : str , *UpperCamelCase : Any , **UpperCamelCase : List[Any] ): '''simple docstring''' requires_backends(self , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : str , *UpperCamelCase : str , **UpperCamelCase : List[Any] ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : Dict , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""flax"""] def __init__( self : Any , *UpperCamelCase : List[Any] , **UpperCamelCase : Optional[Any] ): '''simple docstring''' requires_backends(self , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : Tuple , *UpperCamelCase : Any , **UpperCamelCase : List[Any] ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : List[Any] , *UpperCamelCase : str , **UpperCamelCase : Dict ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""flax"""] def __init__( self : int , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : List[str] ): '''simple docstring''' requires_backends(self , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : str , *UpperCamelCase : Any , **UpperCamelCase : List[str] ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : int , *UpperCamelCase : Tuple , **UpperCamelCase : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""flax"""] def __init__( self : List[str] , *UpperCamelCase : Tuple , **UpperCamelCase : Tuple ): '''simple docstring''' requires_backends(self , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : List[str] , *UpperCamelCase : int , **UpperCamelCase : List[Any] ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : str , *UpperCamelCase : List[Any] , **UpperCamelCase : List[str] ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""flax"""] def __init__( self : Any , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : List[Any] ): '''simple docstring''' requires_backends(self , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : List[str] ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : List[str] , *UpperCamelCase : int , **UpperCamelCase : Tuple ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""flax"""] def __init__( self : List[str] , *UpperCamelCase : Any , **UpperCamelCase : Any ): '''simple docstring''' requires_backends(self , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : Optional[int] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Dict ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : int , *UpperCamelCase : Tuple , **UpperCamelCase : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""flax"""] def __init__( self : int , *UpperCamelCase : List[Any] , **UpperCamelCase : List[Any] ): '''simple docstring''' requires_backends(self , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : List[str] , *UpperCamelCase : List[Any] , **UpperCamelCase : List[str] ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : List[str] , *UpperCamelCase : Dict , **UpperCamelCase : Any ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""flax"""] def __init__( self : List[Any] , *UpperCamelCase : Tuple , **UpperCamelCase : int ): '''simple docstring''' requires_backends(self , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : Any , *UpperCamelCase : Dict , **UpperCamelCase : Tuple ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : List[Any] , *UpperCamelCase : int , **UpperCamelCase : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""flax"""] def __init__( self : Tuple , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ): '''simple docstring''' requires_backends(self , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : List[str] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : Union[str, Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : str ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""flax"""] def __init__( self : str , *UpperCamelCase : Any , **UpperCamelCase : Optional[int] ): '''simple docstring''' requires_backends(self , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : Dict , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : int ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : Optional[int] , *UpperCamelCase : Optional[int] , **UpperCamelCase : str ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""flax"""] def __init__( self : List[Any] , *UpperCamelCase : Tuple , **UpperCamelCase : Optional[int] ): '''simple docstring''' requires_backends(self , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : List[Any] , *UpperCamelCase : int , **UpperCamelCase : List[Any] ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : Optional[Any] , *UpperCamelCase : int , **UpperCamelCase : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""flax"""] def __init__( self : Any , *UpperCamelCase : List[str] , **UpperCamelCase : Tuple ): '''simple docstring''' requires_backends(self , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : Union[str, Any] , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["""flax"""] ) @classmethod def lowerCamelCase__ ( cls : Dict , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["""flax"""] )
320
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor UpperCAmelCase : str = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ): '''simple docstring''' warnings.warn( """The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
320
1
"""simple docstring""" from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class lowerCamelCase__ ( A ): """simple docstring""" __a = """EncodecFeatureExtractor""" __a = ("""T5Tokenizer""", """T5TokenizerFast""") def __init__( self : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' super().__init__(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[int] = self.feature_extractor __UpperCAmelCase : List[Any] = False def lowerCamelCase__ ( self : List[str] , UpperCamelCase : int=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : List[Any]=True ): '''simple docstring''' return self.tokenizer.get_decoder_prompt_ids(task=UpperCamelCase , language=UpperCamelCase , no_timestamps=UpperCamelCase ) def __call__( self : Any , *UpperCamelCase : str , **UpperCamelCase : Optional[int] ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Tuple = kwargs.pop("""audio""" , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = kwargs.pop("""sampling_rate""" , UpperCamelCase ) __UpperCAmelCase : List[Any] = kwargs.pop("""text""" , UpperCamelCase ) if len(UpperCamelCase ) > 0: __UpperCAmelCase : Dict = args[0] __UpperCAmelCase : Optional[int] = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if text is not None: __UpperCAmelCase : Union[str, Any] = self.tokenizer(UpperCamelCase , **UpperCamelCase ) if audio is not None: __UpperCAmelCase : List[Any] = self.feature_extractor(UpperCamelCase , *UpperCamelCase , sampling_rate=UpperCamelCase , **UpperCamelCase ) if audio is None: return inputs elif text is None: return audio_inputs else: __UpperCAmelCase : str = audio_inputs["""input_values"""] if "padding_mask" in audio_inputs: __UpperCAmelCase : List[Any] = audio_inputs["""padding_mask"""] return inputs def lowerCamelCase__ ( self : int , *UpperCamelCase : Tuple , **UpperCamelCase : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Any = kwargs.pop("""audio""" , UpperCamelCase ) __UpperCAmelCase : List[str] = kwargs.pop("""padding_mask""" , UpperCamelCase ) if len(UpperCamelCase ) > 0: __UpperCAmelCase : List[str] = args[0] __UpperCAmelCase : List[Any] = args[1:] if audio_values is not None: return self._decode_audio(UpperCamelCase , padding_mask=UpperCamelCase ) else: return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : str , *UpperCamelCase : Tuple , **UpperCamelCase : Dict ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : Optional = None ): '''simple docstring''' __UpperCAmelCase : List[Any] = to_numpy(UpperCamelCase ) __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = audio_values.shape if padding_mask is None: return list(UpperCamelCase ) __UpperCAmelCase : List[Any] = to_numpy(UpperCamelCase ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) __UpperCAmelCase : List[Any] = seq_len - padding_mask.shape[-1] __UpperCAmelCase : Optional[int] = 1 - self.feature_extractor.padding_value __UpperCAmelCase : Optional[Any] = np.pad(UpperCamelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=UpperCamelCase ) __UpperCAmelCase : Tuple = audio_values.tolist() for i in range(UpperCamelCase ): __UpperCAmelCase : Dict = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] __UpperCAmelCase : List[Any] = sliced_audio.reshape(UpperCamelCase , -1 ) return audio_values
320
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = LEDTokenizer __a = LEDTokenizerFast __a = True def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' super().setUp() __UpperCAmelCase : Tuple = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] __UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) __UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} __UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase ) ) def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : List[str] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : str , UpperCamelCase : Any ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self : str ): '''simple docstring''' return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCamelCase , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIn("""input_ids""" , UpperCamelCase ) self.assertIn("""attention_mask""" , UpperCamelCase ) self.assertNotIn("""labels""" , UpperCamelCase ) self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : str = tokenizer( ["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual(batch.input_ids.shape , (2, 5_122) ) @require_torch def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""] __UpperCAmelCase : int = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Tuple = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Optional[Any] = inputs["""input_ids"""] __UpperCAmelCase : List[str] = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""] __UpperCAmelCase : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , padding=UpperCamelCase ) __UpperCAmelCase : str = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]] __UpperCAmelCase : List[Any] = tokenizer.pad(UpperCamelCase ) self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Any = """A, <mask> AllenNLP sentence.""" __UpperCAmelCase : Dict = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) __UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) __UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) __UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
320
1
"""simple docstring""" from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance UpperCAmelCase : int = 6378137.0 UpperCAmelCase : Tuple = 6356752.314245 UpperCAmelCase : List[str] = 637_8137 def lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> float: '''simple docstring''' __UpperCAmelCase : str = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude __UpperCAmelCase : int = atan((1 - flattening) * tan(radians(_UpperCamelCase ) ) ) __UpperCAmelCase : Any = atan((1 - flattening) * tan(radians(_UpperCamelCase ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius __UpperCAmelCase : List[str] = haversine_distance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) / EQUATORIAL_RADIUS # Intermediate P and Q values __UpperCAmelCase : List[Any] = (b_lata + b_lata) / 2 __UpperCAmelCase : Optional[int] = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) __UpperCAmelCase : Any = (sin(_UpperCamelCase ) ** 2) * (cos(_UpperCamelCase ) ** 2) __UpperCAmelCase : List[str] = cos(sigma / 2 ) ** 2 __UpperCAmelCase : Optional[int] = (sigma - sin(_UpperCamelCase )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) __UpperCAmelCase : List[str] = (cos(_UpperCamelCase ) ** 2) * (sin(_UpperCamelCase ) ** 2) __UpperCAmelCase : List[Any] = sin(sigma / 2 ) ** 2 __UpperCAmelCase : int = (sigma + sin(_UpperCamelCase )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class lowerCamelCase__ : """simple docstring""" def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : List[Any]=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : int=99 , UpperCamelCase : Any=[1, 1, 2] , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Union[str, Any]=8 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=0.0 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Any=3 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : str=None , UpperCamelCase : Tuple=False , ): '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : int = batch_size __UpperCAmelCase : str = seq_length __UpperCAmelCase : Optional[Any] = is_training __UpperCAmelCase : Optional[Any] = use_input_mask __UpperCAmelCase : Tuple = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : Optional[int] = block_sizes __UpperCAmelCase : Optional[Any] = num_decoder_layers __UpperCAmelCase : Union[str, Any] = d_model __UpperCAmelCase : Dict = n_head __UpperCAmelCase : Optional[Any] = d_head __UpperCAmelCase : Dict = d_inner __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout __UpperCAmelCase : List[Any] = attention_dropout __UpperCAmelCase : str = activation_dropout __UpperCAmelCase : Union[str, Any] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : str = 2 __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : List[Any] = num_choices __UpperCAmelCase : Any = scope __UpperCAmelCase : Dict = initializer_std # Used in the tests to check the size of the first attention layer __UpperCAmelCase : Dict = n_head # Used in the tests to check the size of the first hidden state __UpperCAmelCase : Dict = self.d_model # Used in the tests to check the number of output hidden states/attentions __UpperCAmelCase : Dict = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: __UpperCAmelCase : List[Any] = self.num_hidden_layers + 2 def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[str] = None if self.use_input_mask: __UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : str = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : List[str] = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = [input_ids, input_mask] __UpperCAmelCase : Dict = model(UpperCamelCase ) __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : Any = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) __UpperCAmelCase : int = [input_ids, input_mask] __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) __UpperCAmelCase : List[Any] = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : str = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , ): '''simple docstring''' __UpperCAmelCase : Tuple = TFFunnelForPreTraining(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : int = TFFunnelForMaskedLM(config=UpperCamelCase ) __UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_labels __UpperCAmelCase : Optional[Any] = TFFunnelForSequenceClassification(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_choices __UpperCAmelCase : str = TFFunnelForMultipleChoice(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : str = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : int = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : List[str] = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : str = TFFunnelForTokenClassification(config=UpperCamelCase ) __UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Any = TFFunnelForQuestionAnswering(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Any = model(UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : Dict = config_and_inputs __UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowerCamelCase__ ( A , A , unittest.TestCase ): """simple docstring""" __a = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) __a = ( { """feature-extraction""": (TFFunnelBaseModel, TFFunnelModel), """fill-mask""": TFFunnelForMaskedLM, """question-answering""": TFFunnelForQuestionAnswering, """text-classification""": TFFunnelForSequenceClassification, """token-classification""": TFFunnelForTokenClassification, """zero-shot""": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) __a = False __a = False def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModelTester(self ) __UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) @require_tf class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) __a = False __a = False def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : List[str] = TFFunnelModelTester(self , base=UpperCamelCase ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
320
1
"""simple docstring""" import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" @property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' torch.manual_seed(0 ) __UpperCAmelCase : Optional[int] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Dict = self.dummy_uncond_unet __UpperCAmelCase : List[Any] = ScoreSdeVeScheduler() __UpperCAmelCase : int = ScoreSdeVePipeline(unet=UpperCamelCase , scheduler=UpperCamelCase ) sde_ve.to(UpperCamelCase ) sde_ve.set_progress_bar_config(disable=UpperCamelCase ) __UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) __UpperCAmelCase : Any = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=UpperCamelCase ).images __UpperCAmelCase : Dict = torch.manual_seed(0 ) __UpperCAmelCase : List[Any] = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=UpperCamelCase , return_dict=UpperCamelCase )[ 0 ] __UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1] __UpperCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __UpperCAmelCase : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = """google/ncsnpp-church-256""" __UpperCAmelCase : str = UNetaDModel.from_pretrained(UpperCamelCase ) __UpperCAmelCase : int = ScoreSdeVeScheduler.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = ScoreSdeVePipeline(unet=UpperCamelCase , scheduler=UpperCamelCase ) sde_ve.to(UpperCamelCase ) sde_ve.set_progress_bar_config(disable=UpperCamelCase ) __UpperCAmelCase : Tuple = torch.manual_seed(0 ) __UpperCAmelCase : Optional[Any] = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=UpperCamelCase ).images __UpperCAmelCase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) __UpperCAmelCase : Optional[Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
320
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = 0 while b > 0: if b & 1: __UpperCAmelCase : int = ((res % c) + (a % c)) % c a += a b >>= 1 return res
320
1
"""simple docstring""" import warnings from contextlib import contextmanager from ....processing_utils import ProcessorMixin class lowerCamelCase__ ( A ): """simple docstring""" __a = """MCTCTFeatureExtractor""" __a = """AutoTokenizer""" def __init__( self : Tuple , UpperCamelCase : List[str] , UpperCamelCase : int ): '''simple docstring''' super().__init__(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Tuple = self.feature_extractor __UpperCAmelCase : List[str] = False def __call__( self : List[str] , *UpperCamelCase : Tuple , **UpperCamelCase : Optional[int] ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*UpperCamelCase , **UpperCamelCase ) if "raw_speech" in kwargs: warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" ) __UpperCAmelCase : List[str] = kwargs.pop("""raw_speech""" ) else: __UpperCAmelCase : Optional[int] = kwargs.pop("""audio""" , UpperCamelCase ) __UpperCAmelCase : int = kwargs.pop("""sampling_rate""" , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = kwargs.pop("""text""" , UpperCamelCase ) if len(UpperCamelCase ) > 0: __UpperCAmelCase : int = args[0] __UpperCAmelCase : Dict = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if audio is not None: __UpperCAmelCase : Optional[int] = self.feature_extractor(UpperCamelCase , *UpperCamelCase , sampling_rate=UpperCamelCase , **UpperCamelCase ) if text is not None: __UpperCAmelCase : Optional[Any] = self.tokenizer(UpperCamelCase , **UpperCamelCase ) if text is None: return inputs elif audio is None: return encodings else: __UpperCAmelCase : List[str] = encodings["""input_ids"""] return inputs def lowerCamelCase__ ( self : Dict , *UpperCamelCase : Any , **UpperCamelCase : str ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Tuple ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor.pad(*UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Dict = kwargs.pop("""input_features""" , UpperCamelCase ) __UpperCAmelCase : Any = kwargs.pop("""labels""" , UpperCamelCase ) if len(UpperCamelCase ) > 0: __UpperCAmelCase : Optional[Any] = args[0] __UpperCAmelCase : str = args[1:] if input_features is not None: __UpperCAmelCase : Any = self.feature_extractor.pad(UpperCamelCase , *UpperCamelCase , **UpperCamelCase ) if labels is not None: __UpperCAmelCase : List[Any] = self.tokenizer.pad(UpperCamelCase , **UpperCamelCase ) if labels is None: return input_features elif input_features is None: return labels else: __UpperCAmelCase : Any = labels["""input_ids"""] return input_features def lowerCamelCase__ ( self : Dict , *UpperCamelCase : Any , **UpperCamelCase : str ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @contextmanager def lowerCamelCase__ ( self : Dict ): '''simple docstring''' warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your audio inputs, or in a separate call.""" ) __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : List[Any] = self.tokenizer yield __UpperCAmelCase : Tuple = self.feature_extractor __UpperCAmelCase : int = False
320
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""image_processor""", """tokenizer"""] __a = """AutoImageProcessor""" __a = """AutoTokenizer""" def __init__( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ): '''simple docstring''' super().__init__(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : str = self.image_processor def __call__( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=None , **UpperCamelCase : Optional[int] ): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: __UpperCAmelCase : List[str] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if images is not None: __UpperCAmelCase : Optional[Any] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: __UpperCAmelCase : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def lowerCamelCase__ ( self : List[str] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
320
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : Any = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = [ 'SEW_PRETRAINED_MODEL_ARCHIVE_LIST', 'SEWForCTC', 'SEWForSequenceClassification', 'SEWModel', 'SEWPreTrainedModel', ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
"""simple docstring""" from __future__ import annotations def lowerCamelCase ( _UpperCamelCase : list[float] , _UpperCamelCase : list[float] ) -> float: '''simple docstring''' __UpperCAmelCase : Tuple = sorted(numsa + numsa ) __UpperCAmelCase ,__UpperCAmelCase : Dict = divmod(len(_UpperCamelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : List[Any] = [float(x) for x in input('Enter the elements of first array: ').split()] UpperCAmelCase : Optional[int] = [float(x) for x in input('Enter the elements of second array: ').split()] print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
320
1
"""simple docstring""" import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask UpperCAmelCase : Any = logging.getLogger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" __a = """token-classification""" def __init__( self : Dict , UpperCamelCase : Dict ): '''simple docstring''' if type(UpperCamelCase ) == dict: __UpperCAmelCase : Tuple = Namespace(**UpperCamelCase ) __UpperCAmelCase : int = import_module("""tasks""" ) try: __UpperCAmelCase : Optional[int] = getattr(UpperCamelCase , hparams.task_type ) __UpperCAmelCase : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) __UpperCAmelCase : List[Any] = self.token_classification_task.get_labels(hparams.labels ) __UpperCAmelCase : Union[str, Any] = CrossEntropyLoss().ignore_index super().__init__(UpperCamelCase , len(self.labels ) , self.mode ) def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : Optional[int] ): '''simple docstring''' return self.model(**UpperCamelCase ) def lowerCamelCase__ ( self : str , UpperCamelCase : List[Any] , UpperCamelCase : Dict ): '''simple docstring''' __UpperCAmelCase : List[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": __UpperCAmelCase : Union[str, Any] = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids __UpperCAmelCase : Optional[Any] = self(**UpperCamelCase ) __UpperCAmelCase : Optional[int] = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : str = self.hparams for mode in ["train", "dev", "test"]: __UpperCAmelCase : Optional[Any] = self._feature_file(UpperCamelCase ) if os.path.exists(UpperCamelCase ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , UpperCamelCase ) __UpperCAmelCase : Optional[int] = torch.load(UpperCamelCase ) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir ) __UpperCAmelCase : Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase ) __UpperCAmelCase : str = self.token_classification_task.convert_examples_to_features( UpperCamelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info("""Saving features into cached file %s""" , UpperCamelCase ) torch.save(UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : bool = False ): '''simple docstring''' __UpperCAmelCase : str = self._feature_file(UpperCamelCase ) logger.info("""Loading features from cached file %s""" , UpperCamelCase ) __UpperCAmelCase : Optional[int] = torch.load(UpperCamelCase ) __UpperCAmelCase : Optional[int] = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) __UpperCAmelCase : Dict = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: __UpperCAmelCase : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: __UpperCAmelCase : Dict = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) __UpperCAmelCase : str = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , batch_size=UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] ): '''simple docstring''' """Compute validation""" "" __UpperCAmelCase : Union[str, Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": __UpperCAmelCase : Union[str, Any] = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids __UpperCAmelCase : int = self(**UpperCamelCase ) __UpperCAmelCase ,__UpperCAmelCase : List[str] = outputs[:2] __UpperCAmelCase : Optional[int] = logits.detach().cpu().numpy() __UpperCAmelCase : Any = inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean() __UpperCAmelCase : Any = np.concatenate([x["""pred"""] for x in outputs] , axis=0 ) __UpperCAmelCase : int = np.argmax(UpperCamelCase , axis=2 ) __UpperCAmelCase : Optional[Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 ) __UpperCAmelCase : Tuple = dict(enumerate(self.labels ) ) __UpperCAmelCase : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )] __UpperCAmelCase : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) __UpperCAmelCase : Union[str, Any] = { """val_loss""": val_loss_mean, """accuracy_score""": accuracy_score(UpperCamelCase , UpperCamelCase ), """precision""": precision_score(UpperCamelCase , UpperCamelCase ), """recall""": recall_score(UpperCamelCase , UpperCamelCase ), """f1""": fa_score(UpperCamelCase , UpperCamelCase ), } __UpperCAmelCase : List[Any] = dict(results.items() ) __UpperCAmelCase : Optional[int] = results return ret, preds_list, out_label_list def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = self._eval_end(UpperCamelCase ) __UpperCAmelCase : Dict = ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def lowerCamelCase__ ( self : int , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[str] = self._eval_end(UpperCamelCase ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 __UpperCAmelCase : str = ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def lowerCamelCase__ ( UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] ): '''simple docstring''' BaseTransformer.add_model_specific_args(UpperCamelCase , UpperCamelCase ) parser.add_argument( """--task_type""" , default="""NER""" , type=UpperCamelCase , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" ) parser.add_argument( """--max_seq_length""" , default=128 , type=UpperCamelCase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--labels""" , default="""""" , type=UpperCamelCase , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , ) parser.add_argument( """--gpus""" , default=0 , type=UpperCamelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) return parser if __name__ == "__main__": UpperCAmelCase : Optional[Any] = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) UpperCAmelCase : Dict = NERTransformer.add_model_specific_args(parser, os.getcwd()) UpperCAmelCase : Union[str, Any] = parser.parse_args() UpperCAmelCase : List[Any] = NERTransformer(args) UpperCAmelCase : List[str] = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 UpperCAmelCase : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True)) UpperCAmelCase : str = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
320
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Optional[int] = tokenizer("""This is me""" , return_tensors="""pt""" ) __UpperCAmelCase : int = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) __UpperCAmelCase : Tuple = model.generate(**UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) __UpperCAmelCase : Tuple = model_reloaded.generate(**UpperCamelCase ) self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase ) ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Any = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(UpperCamelCase ): model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() model.save_pretrained(UpperCamelCase )
320
1
"""simple docstring""" from __future__ import annotations class lowerCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] , UpperCamelCase : Optional[int]=None ): '''simple docstring''' __UpperCAmelCase : Any = data __UpperCAmelCase : Optional[Any] = None def __repr__( self : List[str] ): '''simple docstring''' __UpperCAmelCase : int = [] __UpperCAmelCase : Tuple = self while temp: string_rep.append(f'''{temp.data}''' ) __UpperCAmelCase : Union[str, Any] = temp.next return "->".join(UpperCamelCase ) def lowerCamelCase ( _UpperCamelCase : list ) -> Tuple: '''simple docstring''' if not elements_list: raise Exception("""The Elements List is empty""" ) __UpperCAmelCase : List[Any] = Node(elements_list[0] ) for i in range(1 , len(_UpperCamelCase ) ): __UpperCAmelCase : List[str] = Node(elements_list[i] ) __UpperCAmelCase : List[Any] = current.next return head def lowerCamelCase ( _UpperCamelCase : Node ) -> None: '''simple docstring''' if head_node is not None and isinstance(_UpperCamelCase , _UpperCamelCase ): print_reverse(head_node.next ) print(head_node.data ) def lowerCamelCase ( ) -> Optional[int]: '''simple docstring''' from doctest import testmod testmod() __UpperCAmelCase : int = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] ) print("""Linked List:""" ) print(_UpperCamelCase ) print("""Elements in Reverse:""" ) print_reverse(_UpperCamelCase ) if __name__ == "__main__": main()
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCAmelCase : Dict = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" from pathlib import Path import fire from tqdm import tqdm def lowerCamelCase ( _UpperCamelCase : Union[str, Any]="ro" , _UpperCamelCase : Dict="en" , _UpperCamelCase : Dict="wmt16" , _UpperCamelCase : Optional[int]=None ) -> None: '''simple docstring''' try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError("""run pip install datasets""" ) __UpperCAmelCase : str = f'''{src_lang}-{tgt_lang}''' print(f'''Converting {dataset}-{pair}''' ) __UpperCAmelCase : Dict = datasets.load_dataset(_UpperCamelCase , _UpperCamelCase ) if save_dir is None: __UpperCAmelCase : List[Any] = f'''{dataset}-{pair}''' __UpperCAmelCase : Tuple = Path(_UpperCamelCase ) save_dir.mkdir(exist_ok=_UpperCamelCase ) for split in ds.keys(): print(f'''Splitting {split} with {ds[split].num_rows} records''' ) # to save to val.source, val.target like summary datasets __UpperCAmelCase : List[str] = """val""" if split == """validation""" else split __UpperCAmelCase : str = save_dir.joinpath(f'''{fn}.source''' ) __UpperCAmelCase : str = save_dir.joinpath(f'''{fn}.target''' ) __UpperCAmelCase : Any = src_path.open("""w+""" ) __UpperCAmelCase : Dict = tgt_path.open("""w+""" ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): __UpperCAmelCase : Optional[int] = x["""translation"""] src_fp.write(ex[src_lang] + """\n""" ) tgt_fp.write(ex[tgt_lang] + """\n""" ) print(f'''Saved {dataset} dataset to {save_dir}''' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase : List[str] = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler') class lowerCamelCase__ : """simple docstring""" def __init__( self : str , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : bool = True , UpperCamelCase : bool = False ): '''simple docstring''' __UpperCAmelCase : Optional[int] = scheduler __UpperCAmelCase : Any = optimizers if isinstance(UpperCamelCase , (list, tuple) ) else [optimizers] __UpperCAmelCase : Optional[int] = split_batches __UpperCAmelCase : int = step_with_optimizer __UpperCAmelCase : Optional[int] = GradientState() def lowerCamelCase__ ( self : Dict , *UpperCamelCase : Optional[int] , **UpperCamelCase : int ): '''simple docstring''' if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*UpperCamelCase , **UpperCamelCase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*UpperCamelCase , **UpperCamelCase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step __UpperCAmelCase : Tuple = AcceleratorState().num_processes for _ in range(UpperCamelCase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , """total_steps""" ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*UpperCamelCase , **UpperCamelCase ) else: self.scheduler.step(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return self.scheduler.get_last_lr() def lowerCamelCase__ ( self : int ): '''simple docstring''' return self.scheduler.state_dict() def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[Any] ): '''simple docstring''' self.scheduler.load_state_dict(UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' return self.scheduler.get_lr() def lowerCamelCase__ ( self : Union[str, Any] , *UpperCamelCase : Tuple , **UpperCamelCase : int ): '''simple docstring''' return self.scheduler.print_lr(*UpperCamelCase , **UpperCamelCase )
320
"""simple docstring""" def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = [] __UpperCAmelCase : List[str] = 1 while len(_UpperCamelCase ) < 1E6: constant.append(str(_UpperCamelCase ) ) i += 1 __UpperCAmelCase : List[str] = """""".join(_UpperCamelCase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[9_9] ) * int(constant[9_9_9] ) * int(constant[9_9_9_9] ) * int(constant[9_9_9_9_9] ) * int(constant[9_9_9_9_9_9] ) ) if __name__ == "__main__": print(solution())
320
1
"""simple docstring""" import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict ) -> Any: '''simple docstring''' assert isinstance(_UpperCamelCase , _UpperCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] ) -> int: '''simple docstring''' __UpperCAmelCase : Tuple = tmp_path / """cache""" __UpperCAmelCase : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __UpperCAmelCase : Union[str, Any] = JsonDatasetReader(_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase ).read() _check_json_dataset(_UpperCamelCase , _UpperCamelCase ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Any = tmp_path / """cache""" __UpperCAmelCase : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} __UpperCAmelCase : List[str] = features.copy() if features else default_expected_features __UpperCAmelCase : List[str] = ( Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase : Optional[int] = JsonDatasetReader(_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase ).read() _check_json_dataset(_UpperCamelCase , _UpperCamelCase ) @pytest.mark.parametrize( """features""" , [ None, {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}, ] , ) def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple ) -> Any: '''simple docstring''' __UpperCAmelCase : Tuple = tmp_path / """cache""" __UpperCAmelCase : Any = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""} __UpperCAmelCase : List[Any] = features.copy() if features else default_expected_features __UpperCAmelCase : Tuple = ( Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase : Tuple = JsonDatasetReader(_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase ).read() assert isinstance(_UpperCamelCase , _UpperCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : List[Any] ) -> Dict: '''simple docstring''' __UpperCAmelCase : List[Any] = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""} __UpperCAmelCase : str = features.copy() __UpperCAmelCase : str = ( Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase : Union[str, Any] = tmp_path / """cache""" __UpperCAmelCase : Tuple = JsonDatasetReader(_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase ).read() assert isinstance(_UpperCamelCase , _UpperCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Any = tmp_path / """cache""" __UpperCAmelCase : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} __UpperCAmelCase : Optional[int] = JsonDatasetReader(_UpperCamelCase , cache_dir=_UpperCamelCase , split=_UpperCamelCase ).read() _check_json_dataset(_UpperCamelCase , _UpperCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] ) -> Tuple: '''simple docstring''' if issubclass(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : int = jsonl_path elif issubclass(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : str = [jsonl_path] __UpperCAmelCase : Any = tmp_path / """cache""" __UpperCAmelCase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} __UpperCAmelCase : List[Any] = JsonDatasetReader(_UpperCamelCase , cache_dir=_UpperCamelCase ).read() _check_json_dataset(_UpperCamelCase , _UpperCamelCase ) def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str=("train",) ) -> List[Any]: '''simple docstring''' assert isinstance(_UpperCamelCase , _UpperCamelCase ) for split in splits: __UpperCAmelCase : List[Any] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = tmp_path / """cache""" __UpperCAmelCase : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __UpperCAmelCase : int = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase ).read() _check_json_datasetdict(_UpperCamelCase , _UpperCamelCase ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Dict = tmp_path / """cache""" __UpperCAmelCase : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} __UpperCAmelCase : Optional[Any] = features.copy() if features else default_expected_features __UpperCAmelCase : str = ( Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase : List[str] = JsonDatasetReader({"""train""": jsonl_path} , features=_UpperCamelCase , cache_dir=_UpperCamelCase ).read() _check_json_datasetdict(_UpperCamelCase , _UpperCamelCase ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : int ) -> Optional[int]: '''simple docstring''' if split: __UpperCAmelCase : Dict = {split: jsonl_path} else: __UpperCAmelCase : Dict = """train""" __UpperCAmelCase : str = {"""train""": jsonl_path, """test""": jsonl_path} __UpperCAmelCase : Optional[int] = tmp_path / """cache""" __UpperCAmelCase : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} __UpperCAmelCase : Tuple = JsonDatasetReader(_UpperCamelCase , cache_dir=_UpperCamelCase ).read() _check_json_datasetdict(_UpperCamelCase , _UpperCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase ( _UpperCamelCase : Tuple ) -> int: '''simple docstring''' return json.load(_UpperCamelCase ) def lowerCamelCase ( _UpperCamelCase : Tuple ) -> int: '''simple docstring''' return [json.loads(_UpperCamelCase ) for line in buffer] class lowerCamelCase__ : """simple docstring""" @pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase ).write() buffer.seek(0 ) __UpperCAmelCase : Optional[Any] = load_json_function(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) assert isinstance(exported_content[0] , UpperCamelCase ) assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize( """orient, container, keys, len_at""" , [ ("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None), ("""split""", dict, {"""columns""", """data"""}, """data"""), ("""index""", dict, set("""0123456789""" ), None), ("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""), ("""values""", list, None, None), ("""table""", dict, {"""schema""", """data"""}, """data"""), ] , ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Any ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase ).write() buffer.seek(0 ) __UpperCAmelCase : int = load_json(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase , """keys""" ) and not hasattr(exported_content[0] , """keys""" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] ) def lowerCamelCase__ ( self : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , num_proc=2 ).write() buffer.seek(0 ) __UpperCAmelCase : Optional[Any] = load_json_function(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) assert isinstance(exported_content[0] , UpperCamelCase ) assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize( """orient, container, keys, len_at""" , [ ("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None), ("""split""", dict, {"""columns""", """data"""}, """data"""), ("""index""", dict, set("""0123456789""" ), None), ("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""), ("""values""", list, None, None), ("""table""", dict, {"""schema""", """data"""}, """data"""), ] , ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Dict ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase , num_proc=2 ).write() buffer.seek(0 ) __UpperCAmelCase : int = load_json(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase , """keys""" ) and not hasattr(exported_content[0] , """keys""" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase ) == 10 def lowerCamelCase__ ( self : str , UpperCamelCase : List[Any] ): '''simple docstring''' with pytest.raises(UpperCamelCase ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , num_proc=0 ) @pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : List[str] ): '''simple docstring''' __UpperCAmelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / f'''test.json.{extension}''' __UpperCAmelCase : Union[str, Any] = str(shared_datadir / f'''test_file.json.{extension}''' ) JsonDatasetWriter(UpperCamelCase , UpperCamelCase , compression=UpperCamelCase ).write() with fsspec.open(UpperCamelCase , """rb""" , compression="""infer""" ) as f: __UpperCAmelCase : Tuple = f.read() with fsspec.open(UpperCamelCase , """rb""" , compression="""infer""" ) as f: __UpperCAmelCase : List[Any] = f.read() assert exported_content == original_content
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Tuple = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" from __future__ import annotations def lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple: '''simple docstring''' if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative in a semiconductor""" ) elif hole_conc < 0: raise ValueError("""Hole concentration cannot be negative in a semiconductor""" ) elif intrinsic_conc < 0: raise ValueError( """Intrinsic concentration cannot be negative in a semiconductor""" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput UpperCAmelCase : Optional[Any] = 'scheduler_config.json' class lowerCamelCase__ ( A ): """simple docstring""" __a = 1 __a = 2 __a = 3 __a = 4 __a = 5 __a = 6 __a = 7 __a = 8 __a = 9 __a = 10 __a = 11 __a = 12 __a = 13 __a = 14 @dataclass class lowerCamelCase__ ( A ): """simple docstring""" __a = 42 class lowerCamelCase__ : """simple docstring""" __a = SCHEDULER_CONFIG_NAME __a = [] __a = True @classmethod def lowerCamelCase__ ( cls : Any , UpperCamelCase : Dict[str, Any] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[Any] = cls.load_config( pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , ) return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : Union[str, os.PathLike] , UpperCamelCase : bool = False , **UpperCamelCase : Optional[Any] ): '''simple docstring''' self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return self._get_compatibles() @classmethod def lowerCamelCase__ ( cls : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = list(set([cls.__name__] + cls._compatibles ) ) __UpperCAmelCase : List[str] = importlib.import_module(__name__.split(""".""" )[0] ) __UpperCAmelCase : List[str] = [ getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase ) ] return compatible_classes
320
1
"""simple docstring""" import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) __UpperCAmelCase : Dict = Vector() def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : List[Any] = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(UpperCamelCase ) , """(0,0,0,0,0,1)""" ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = Vector([1, 2, 3, 4] ) self.assertEqual(len(UpperCamelCase ) , 4 ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : int = Vector([1, 2] ) __UpperCAmelCase : Optional[Any] = Vector([1, 2, 3, 4, 5] ) __UpperCAmelCase : Optional[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) __UpperCAmelCase : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = Vector([1, 2, 3] ) __UpperCAmelCase : Union[str, Any] = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Dict = Vector([1, 2, 3] ) __UpperCAmelCase : List[str] = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : int = Vector([1, 2, 3] ) __UpperCAmelCase : Any = Vector([2, -1, 4] ) # for test of dot product __UpperCAmelCase : int = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" ) self.assertEqual((a * b) , 0 ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 ) def lowerCamelCase__ ( self : str ): '''simple docstring''' self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = Vector([1, 2, 3] ) __UpperCAmelCase : Dict = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , UpperCamelCase , UpperCamelCase ) ) , """(3,4,7)""" ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Optional[int] = Vector([1, 0, 0, 0, 0, 0] ) __UpperCAmelCase : str = x.copy() self.assertEqual(str(UpperCamelCase ) , str(UpperCamelCase ) ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(UpperCamelCase ) , """(0,1,0)""" ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCamelCase ) ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __UpperCAmelCase : Optional[int] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(UpperCamelCase , UpperCamelCase ) ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __UpperCAmelCase : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(UpperCamelCase , UpperCamelCase ) ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Tuple = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) __UpperCAmelCase : Optional[Any] = Vector([1, 2, 3] ) self.assertEqual("""(14,32,50)""" , str(a * x ) ) self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCamelCase ) ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __UpperCAmelCase : Tuple = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __UpperCAmelCase : List[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' self.assertEqual( """|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
320
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase__ ( *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ): '''simple docstring''' pass def lowerCamelCase ( _UpperCamelCase : Image ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = hashlib.mda(image.tobytes() ) return m.hexdigest()[:1_0] def lowerCamelCase ( _UpperCamelCase : Image ) -> Dict: '''simple docstring''' __UpperCAmelCase : Tuple = np.array(_UpperCamelCase ) __UpperCAmelCase : List[Any] = npimg.shape return {"hash": hashimage(_UpperCamelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" __a = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __a = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' pass @require_tf @unittest.skip("""Image segmentation not implemented in TF""" ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Tuple = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" ) __UpperCAmelCase : Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : int = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, {"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967}, {"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993}, {"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909}, {"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879}, {"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834}, {"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716}, {"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612}, {"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599}, {"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552}, {"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532}, {"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516}, {"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499}, {"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483}, {"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464}, {"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408}, {"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335}, {"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326}, {"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262}, {"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999}, {"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986}, {"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984}, {"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873}, {"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871} ] , ) # fmt: on @require_torch @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Any = """facebook/sam-vit-huge""" __UpperCAmelCase : str = pipeline("""mask-generation""" , model=UpperCamelCase ) __UpperCAmelCase : int = image_segmenter( """http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : Dict = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, ] , )
320
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase : Dict = logging.get_logger(__name__) UpperCAmelCase : int = { 'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json', # See all Dinat models at https://huggingface.co/models?filter=dinat } class lowerCamelCase__ ( A , A ): """simple docstring""" __a = """dinat""" __a = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : List[str] , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : str=64 , UpperCamelCase : List[Any]=[3, 4, 6, 5] , UpperCamelCase : Tuple=[2, 4, 8, 16] , UpperCamelCase : Optional[int]=7 , UpperCamelCase : Union[str, Any]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCamelCase : Dict=3.0 , UpperCamelCase : List[Any]=True , UpperCamelCase : int=0.0 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : str=0.1 , UpperCamelCase : str="gelu" , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : str=1e-5 , UpperCamelCase : List[Any]=0.0 , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Tuple , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = patch_size __UpperCAmelCase : List[str] = num_channels __UpperCAmelCase : Union[str, Any] = embed_dim __UpperCAmelCase : Optional[Any] = depths __UpperCAmelCase : Optional[int] = len(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = num_heads __UpperCAmelCase : str = kernel_size __UpperCAmelCase : Optional[Any] = dilations __UpperCAmelCase : str = mlp_ratio __UpperCAmelCase : Dict = qkv_bias __UpperCAmelCase : List[Any] = hidden_dropout_prob __UpperCAmelCase : str = attention_probs_dropout_prob __UpperCAmelCase : List[str] = drop_path_rate __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : Tuple = layer_norm_eps __UpperCAmelCase : Tuple = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __UpperCAmelCase : Any = int(embed_dim * 2 ** (len(UpperCamelCase ) - 1) ) __UpperCAmelCase : Union[str, Any] = layer_scale_init_value __UpperCAmelCase : Union[str, Any] = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(UpperCamelCase ) + 1 )] __UpperCAmelCase ,__UpperCAmelCase : List[str] = get_aligned_output_features_output_indices( out_features=UpperCamelCase , out_indices=UpperCamelCase , stage_names=self.stage_names )
320
"""simple docstring""" import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , UpperCamelCase : str ): '''simple docstring''' super().__init__() __UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase ) __UpperCAmelCase : int = list(model.children() )[:-2] __UpperCAmelCase : List[Any] = nn.Sequential(*UpperCamelCase ) __UpperCAmelCase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.pool(self.model(UpperCamelCase ) ) __UpperCAmelCase : List[Any] = torch.flatten(UpperCamelCase , start_dim=2 ) __UpperCAmelCase : Any = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )] __UpperCAmelCase : Any = os.path.dirname(UpperCamelCase ) __UpperCAmelCase : List[str] = tokenizer __UpperCAmelCase : str = labels __UpperCAmelCase : Optional[int] = len(UpperCamelCase ) __UpperCAmelCase : int = max_seq_length __UpperCAmelCase : int = transforms def __len__( self : List[str] ): '''simple docstring''' return len(self.data ) def __getitem__( self : List[str] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) ) __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1] __UpperCAmelCase : Any = sentence[: self.max_seq_length] __UpperCAmelCase : Tuple = torch.zeros(self.n_classes ) __UpperCAmelCase : str = 1 __UpperCAmelCase : Any = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) __UpperCAmelCase : Optional[int] = self.transforms(UpperCamelCase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Any = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = [len(row["""sentence"""] ) for row in batch] __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase ) __UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) __UpperCAmelCase : str = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ): __UpperCAmelCase : List[str] = input_row["""sentence"""] __UpperCAmelCase : Tuple = 1 __UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] ) __UpperCAmelCase : Optional[Any] = torch.stack([row["""label"""] for row in batch] ) __UpperCAmelCase : str = torch.stack([row["""image_start_token"""] for row in batch] ) __UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ) -> int: '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' return transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ), ] )
320
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Tuple = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[str] = [ 'OPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OPTForCausalLM', 'OPTModel', 'OPTPreTrainedModel', 'OPTForSequenceClassification', 'OPTForQuestionAnswering', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ 'FlaxOPTForCausalLM', 'FlaxOPTModel', 'FlaxOPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
320
1