code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" def _snake_case ( lowercase__ : int = 1 , lowercase__ : int = 1_0_0_0 ) -> int: '''simple docstring''' lowerCAmelCase_ :str = 1 lowerCAmelCase_ :str = 0 for divide_by_number in range(lowercase__ , digit + 1 ): lowerCAmelCase_ :list[int] = [] lowerCAmelCase_ :int = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(lowercase__ ): lowerCAmelCase_ :Tuple = len(lowercase__ ) lowerCAmelCase_ :int = divide_by_number else: has_been_divided.append(lowercase__ ) lowerCAmelCase_ :Optional[int] = now_divide * 1_0 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
84
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Any = BioGptTokenizer UpperCAmelCase_ :str = False def __lowerCAmelCase ( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase_ :Optional[Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] lowerCAmelCase_ :str = dict(zip(__A , range(len(__A ) ) ) ) lowerCAmelCase_ :int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(__A ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(__A ) ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: lowerCAmelCase_ :List[Any] = """lower newer""" lowerCAmelCase_ :Tuple = """lower newer""" return input_text, output_text def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :List[str] = BioGptTokenizer(self.vocab_file , self.merges_file ) lowerCAmelCase_ :Union[str, Any] = """lower""" lowerCAmelCase_ :Any = ["""low""", """er</w>"""] lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Dict = tokens + ["""<unk>"""] lowerCAmelCase_ :List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) @slow def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) lowerCAmelCase_ :List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__A ) lowerCAmelCase_ :List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A ) lowerCAmelCase_ :Optional[int] = tokenizer.build_inputs_with_special_tokens(__A ) lowerCAmelCase_ :List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
84
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __UpperCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['DeiTFeatureExtractor'] __UpperCAmelCase = ['DeiTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DeiTForImageClassification', 'DeiTForImageClassificationWithTeacher', 'DeiTForMaskedImageModeling', 'DeiTModel', 'DeiTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDeiTForImageClassification', 'TFDeiTForImageClassificationWithTeacher', 'TFDeiTForMaskedImageModeling', 'TFDeiTModel', 'TFDeiTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
84
"""simple docstring""" from ...configuration_utils import PretrainedConfig class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "bert-generation" def __init__( self , __A=5_0358 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A=2 , __A=1 , __A="absolute" , __A=True , **__A , ) -> Tuple: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :Any = vocab_size lowerCAmelCase_ :List[Any] = hidden_size lowerCAmelCase_ :Optional[int] = num_hidden_layers lowerCAmelCase_ :int = num_attention_heads lowerCAmelCase_ :List[Any] = hidden_act lowerCAmelCase_ :Optional[Any] = intermediate_size lowerCAmelCase_ :List[Any] = hidden_dropout_prob lowerCAmelCase_ :int = attention_probs_dropout_prob lowerCAmelCase_ :Tuple = max_position_embeddings lowerCAmelCase_ :List[str] = initializer_range lowerCAmelCase_ :Union[str, Any] = layer_norm_eps lowerCAmelCase_ :List[str] = position_embedding_type lowerCAmelCase_ :Optional[int] = use_cache
84
1
"""simple docstring""" from typing import Dict, Optional import numpy as np import datasets __UpperCAmelCase = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n' __UpperCAmelCase = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n' __UpperCAmelCase = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}' def _snake_case ( lowercase__ : int , lowercase__ : str , lowercase__ : Dict , lowercase__ : bool , lowercase__ : Optional[Dict[int, int]] = None , lowercase__ : bool = False , ) -> Tuple: '''simple docstring''' if label_map is not None: for old_id, new_id in label_map.items(): lowerCAmelCase_ :List[str] = new_id # turn into Numpy arrays lowerCAmelCase_ :Any = np.array(lowercase__ ) lowerCAmelCase_ :int = np.array(lowercase__ ) if reduce_labels: lowerCAmelCase_ :Union[str, Any] = 2_5_5 lowerCAmelCase_ :List[Any] = label - 1 lowerCAmelCase_ :int = 2_5_5 lowerCAmelCase_ :List[str] = label != ignore_index lowerCAmelCase_ :Union[str, Any] = np.not_equal(lowercase__ , lowercase__ ) lowerCAmelCase_ :Dict = pred_label[mask] lowerCAmelCase_ :Dict = np.array(lowercase__ )[mask] lowerCAmelCase_ :Optional[Any] = pred_label[pred_label == label] lowerCAmelCase_ :List[str] = np.histogram(lowercase__ , bins=lowercase__ , range=(0, num_labels - 1) )[0] lowerCAmelCase_ :Dict = np.histogram(lowercase__ , bins=lowercase__ , range=(0, num_labels - 1) )[0] lowerCAmelCase_ :str = np.histogram(lowercase__ , bins=lowercase__ , range=(0, num_labels - 1) )[0] lowerCAmelCase_ :Optional[Any] = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def _snake_case ( lowercase__ : List[str] , lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : bool , lowercase__ : Optional[Dict[int, int]] = None , lowercase__ : bool = False , ) -> int: '''simple docstring''' lowerCAmelCase_ :int = np.zeros((num_labels,) , dtype=np.floataa ) lowerCAmelCase_ :int = np.zeros((num_labels,) , dtype=np.floataa ) lowerCAmelCase_ :List[str] = np.zeros((num_labels,) , dtype=np.floataa ) lowerCAmelCase_ :str = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(lowercase__ , lowercase__ ): lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[str] = intersect_and_union( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : bool , lowercase__ : Optional[int] = None , lowercase__ : Optional[Dict[int, int]] = None , lowercase__ : bool = False , ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = total_intersect_and_union( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # compute metrics lowerCAmelCase_ :List[Any] = {} lowerCAmelCase_ :List[str] = total_area_intersect.sum() / total_area_label.sum() lowerCAmelCase_ :Optional[int] = total_area_intersect / total_area_union lowerCAmelCase_ :str = total_area_intersect / total_area_label lowerCAmelCase_ :Union[str, Any] = np.nanmean(lowercase__ ) lowerCAmelCase_ :Optional[Any] = np.nanmean(lowercase__ ) lowerCAmelCase_ :Tuple = all_acc lowerCAmelCase_ :List[str] = iou lowerCAmelCase_ :Optional[Any] = acc if nan_to_num is not None: lowerCAmelCase_ :str = {metric: np.nan_to_num(lowercase__ , nan=lowercase__ ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def __lowerCAmelCase ( self ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { """predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ), """references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ), } ) , reference_urls=[ """https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py""" ] , ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A = None , __A = None , __A = False , ) -> int: lowerCAmelCase_ :Dict = mean_iou( results=__A , gt_seg_maps=__A , num_labels=__A , ignore_index=__A , nan_to_num=__A , label_map=__A , reduce_labels=__A , ) return iou_result
84
"""simple docstring""" def _snake_case ( lowercase__ : List[Any] , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Any ) -> int: '''simple docstring''' lowerCAmelCase_ :int = [False] * len(lowercase__ ) lowerCAmelCase_ :str = [] queue.append(lowercase__ ) lowerCAmelCase_ :Any = True while queue: lowerCAmelCase_ :Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) lowerCAmelCase_ :Union[str, Any] = True lowerCAmelCase_ :int = u return visited[t] def _snake_case ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : str ) -> Dict: '''simple docstring''' lowerCAmelCase_ :List[Any] = [-1] * (len(lowercase__ )) lowerCAmelCase_ :str = 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): lowerCAmelCase_ :List[str] = float("""Inf""" ) lowerCAmelCase_ :List[str] = sink while s != source: # Find the minimum value in select path lowerCAmelCase_ :Any = min(lowercase__ , graph[parent[s]][s] ) lowerCAmelCase_ :Union[str, Any] = parent[s] max_flow += path_flow lowerCAmelCase_ :Tuple = sink while v != source: lowerCAmelCase_ :List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase_ :Union[str, Any] = parent[v] return max_flow __UpperCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __UpperCAmelCase , __UpperCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
84
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :List[str] = "timm_backbone" def __init__( self , __A=None , __A=3 , __A=True , __A=True , __A=None , **__A , ) -> Tuple: super().__init__(**__A ) lowerCAmelCase_ :int = backbone lowerCAmelCase_ :Union[str, Any] = num_channels lowerCAmelCase_ :int = features_only lowerCAmelCase_ :str = use_pretrained_backbone lowerCAmelCase_ :Union[str, Any] = True lowerCAmelCase_ :Optional[Any] = out_indices if out_indices is not None else (-1,)
84
"""simple docstring""" import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = 1_0 lowerCAmelCase_ :Optional[int] = datasets.Features( { """tokens""": datasets.Sequence(datasets.Value("""string""" ) ), """labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ), """answers""": datasets.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), """id""": datasets.Value("""int64""" ), } ) lowerCAmelCase_ :int = datasets.Dataset.from_dict( { """tokens""": [["""foo"""] * 5] * n, """labels""": [[1] * 5] * n, """answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0, """id""": list(range(lowercase__ ) ), } , features=lowercase__ , ) return dataset @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : int ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return filename # FILE_CONTENT + files __UpperCAmelCase = '\\n Text data.\n Second line of data.' @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> str: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt""" lowerCAmelCase_ :List[Any] = FILE_CONTENT with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> Tuple: '''simple docstring''' import bza lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2""" lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] ) -> Dict: '''simple docstring''' import gzip lowerCAmelCase_ :int = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" ) lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with gzip.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' if datasets.config.LZ4_AVAILABLE: import lza.frame lowerCAmelCase_ :List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4""" lowerCAmelCase_ :int = bytes(lowercase__ , """utf-8""" ) with lza.frame.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[int] ) -> Any: '''simple docstring''' if datasets.config.PY7ZR_AVAILABLE: import pyazr lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z""" with pyazr.SevenZipFile(lowercase__ , """w""" ) as archive: archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' import tarfile lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> str: '''simple docstring''' import lzma lowerCAmelCase_ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz""" lowerCAmelCase_ :Optional[Any] = bytes(lowercase__ , """utf-8""" ) with lzma.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import zipfile lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Tuple: '''simple docstring''' if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst""" lowerCAmelCase_ :Any = bytes(lowercase__ , """utf-8""" ) with zstd.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """file.xml""" lowerCAmelCase_ :Any = textwrap.dedent( """\ <?xml version=\"1.0\" encoding=\"UTF-8\" ?> <tmx version=\"1.4\"> <header segtype=\"sentence\" srclang=\"ca\" /> <body> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv> </tu> </body> </tmx>""" ) with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename __UpperCAmelCase = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __UpperCAmelCase = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __UpperCAmelCase = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __UpperCAmelCase = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __UpperCAmelCase = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Union[str, Any]: '''simple docstring''' return DATA_DICT_OF_LISTS @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Any: '''simple docstring''' lowerCAmelCase_ :Tuple = datasets.Dataset.from_dict(lowercase__ ) lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" ) with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con: lowerCAmelCase_ :Union[str, Any] = con.cursor() cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" ) for item in DATA: cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> int: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Optional[int] = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Any: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Dict = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Dict ) -> Union[str, Any]: '''simple docstring''' import bza lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2""" with open(lowercase__ , """rb""" ) as f: lowerCAmelCase_ :Union[str, Any] = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) ) f.write(lowercase__ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : str ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" ) lowerCAmelCase_ :Optional[Any] = pa.schema( { """col_1""": pa.string(), """col_2""": pa.intaa(), """col_3""": pa.floataa(), } ) with open(lowercase__ , """wb""" ) as f: lowerCAmelCase_ :Optional[int] = pq.ParquetWriter(lowercase__ , schema=lowercase__ ) lowerCAmelCase_ :List[str] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ ) writer.write_table(lowercase__ ) writer.close() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Union[str, Any] = {"""data""": DATA} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Optional[Any] = {"""data""": DATA_DICT_OF_LISTS} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_312: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_STR: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int , lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : List[Any] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[str] ) -> int: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : List[str] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : str , lowercase__ : List[str] , lowercase__ : int ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :str = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Dict = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.abc""" with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : List[str] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename("""unsupported.ext""" ) ) f.write(lowercase__ , arcname=os.path.basename("""unsupported_2.ext""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] ) lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" ) with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> int: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Tuple: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : Tuple ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace(""".jpg""" , """2.jpg""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data_dir""" ) (data_dir / "subdir").mkdir() with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden file with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) return data_dir
84
1
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } __UpperCAmelCase = { 'b0': { 'hidden_dim': 12_80, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 2_24, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 12_80, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 2_40, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 14_08, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 2_60, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 15_36, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 3_00, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 17_92, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 3_80, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 20_48, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 4_56, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 23_04, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 5_28, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 25_60, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 6_00, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def _snake_case ( lowercase__ : Tuple ) -> str: '''simple docstring''' lowerCAmelCase_ :Tuple = EfficientNetConfig() lowerCAmelCase_ :str = CONFIG_MAP[model_name]["""hidden_dim"""] lowerCAmelCase_ :List[Any] = CONFIG_MAP[model_name]["""width_coef"""] lowerCAmelCase_ :Any = CONFIG_MAP[model_name]["""depth_coef"""] lowerCAmelCase_ :str = CONFIG_MAP[model_name]["""image_size"""] lowerCAmelCase_ :Optional[int] = CONFIG_MAP[model_name]["""dropout_rate"""] lowerCAmelCase_ :Dict = CONFIG_MAP[model_name]["""dw_padding"""] lowerCAmelCase_ :Dict = """huggingface/label-files""" lowerCAmelCase_ :Optional[int] = """imagenet-1k-id2label.json""" lowerCAmelCase_ :List[str] = 1_0_0_0 lowerCAmelCase_ :Any = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) ) lowerCAmelCase_ :int = {int(lowercase__ ): v for k, v in idalabel.items()} lowerCAmelCase_ :Any = idalabel lowerCAmelCase_ :Union[str, Any] = {v: k for k, v in idalabel.items()} return config def _snake_case ( ) -> int: '''simple docstring''' lowerCAmelCase_ :Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCAmelCase_ :List[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return im def _snake_case ( lowercase__ : int ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :str = CONFIG_MAP[model_name]["""image_size"""] lowerCAmelCase_ :str = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , ) return preprocessor def _snake_case ( lowercase__ : str ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Dict = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] lowerCAmelCase_ :str = sorted(set(lowercase__ ) ) lowerCAmelCase_ :Optional[Any] = len(lowercase__ ) lowerCAmelCase_ :Any = {b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )} lowerCAmelCase_ :List[str] = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: lowerCAmelCase_ :Dict = block_name_mapping[b] rename_keys.append((f"""block{b}_expand_conv/kernel:0""", f"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") ) rename_keys.append((f"""block{b}_expand_bn/gamma:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") ) rename_keys.append((f"""block{b}_expand_bn/beta:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") ) rename_keys.append( (f"""block{b}_expand_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") ) rename_keys.append( (f"""block{b}_expand_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") ) rename_keys.append( (f"""block{b}_dwconv/depthwise_kernel:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") ) rename_keys.append((f"""block{b}_bn/gamma:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") ) rename_keys.append((f"""block{b}_bn/beta:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") ) rename_keys.append( (f"""block{b}_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") ) rename_keys.append( (f"""block{b}_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") ) rename_keys.append((f"""block{b}_se_reduce/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") ) rename_keys.append((f"""block{b}_se_reduce/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") ) rename_keys.append((f"""block{b}_se_expand/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") ) rename_keys.append((f"""block{b}_se_expand/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") ) rename_keys.append( (f"""block{b}_project_conv/kernel:0""", f"""encoder.blocks.{hf_b}.projection.project_conv.weight""") ) rename_keys.append((f"""block{b}_project_bn/gamma:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.weight""") ) rename_keys.append((f"""block{b}_project_bn/beta:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.bias""") ) rename_keys.append( (f"""block{b}_project_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") ) rename_keys.append( (f"""block{b}_project_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) lowerCAmelCase_ :str = {} for item in rename_keys: if item[0] in original_param_names: lowerCAmelCase_ :List[str] = """efficientnet.""" + item[1] lowerCAmelCase_ :Optional[int] = """classifier.weight""" lowerCAmelCase_ :Union[str, Any] = """classifier.bias""" return key_mapping def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : List[Any] ) -> Union[str, Any]: '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue lowerCAmelCase_ :Dict = key_mapping[key] if "_conv" in key and "kernel" in key: lowerCAmelCase_ :Union[str, Any] = torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: lowerCAmelCase_ :List[str] = torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: lowerCAmelCase_ :List[Any] = torch.from_numpy(np.transpose(lowercase__ ) ) else: lowerCAmelCase_ :Any = torch.from_numpy(lowercase__ ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase__ ) @torch.no_grad() def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : int , lowercase__ : Optional[int] ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :Dict = model_classes[model_name]( include_top=lowercase__ , weights="""imagenet""" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="""softmax""" , ) lowerCAmelCase_ :List[str] = original_model.trainable_variables lowerCAmelCase_ :str = original_model.non_trainable_variables lowerCAmelCase_ :Optional[Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: lowerCAmelCase_ :Dict = param.numpy() lowerCAmelCase_ :Union[str, Any] = list(tf_params.keys() ) # Load HuggingFace model lowerCAmelCase_ :Dict = get_efficientnet_config(lowercase__ ) lowerCAmelCase_ :Union[str, Any] = EfficientNetForImageClassification(lowercase__ ).eval() lowerCAmelCase_ :int = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) lowerCAmelCase_ :Optional[int] = rename_keys(lowercase__ ) replace_params(lowercase__ , lowercase__ , lowercase__ ) # Initialize preprocessor and preprocess input image lowerCAmelCase_ :Dict = convert_image_processor(lowercase__ ) lowerCAmelCase_ :str = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): lowerCAmelCase_ :str = hf_model(**lowercase__ ) lowerCAmelCase_ :Tuple = outputs.logits.detach().numpy() # Original model inference lowerCAmelCase_ :Tuple = False lowerCAmelCase_ :Optional[int] = CONFIG_MAP[model_name]["""image_size"""] lowerCAmelCase_ :Optional[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) lowerCAmelCase_ :str = image.img_to_array(lowercase__ ) lowerCAmelCase_ :List[Any] = np.expand_dims(lowercase__ , axis=0 ) lowerCAmelCase_ :Optional[Any] = original_model.predict(lowercase__ ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase__ ): os.mkdir(lowercase__ ) # Save converted model and image processor hf_model.save_pretrained(lowercase__ ) preprocessor.save_pretrained(lowercase__ ) if push_to_hub: # Push model and image processor to hub print(f"""Pushing converted {model_name} to the hub...""" ) lowerCAmelCase_ :List[Any] = f"""efficientnet-{model_name}""" preprocessor.push_to_hub(lowercase__ ) hf_model.push_to_hub(lowercase__ ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') __UpperCAmelCase = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
84
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Optional[Any] = "data2vec-text" def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.0_2 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> Tuple: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :Dict = vocab_size lowerCAmelCase_ :Dict = hidden_size lowerCAmelCase_ :int = num_hidden_layers lowerCAmelCase_ :List[Any] = num_attention_heads lowerCAmelCase_ :Any = hidden_act lowerCAmelCase_ :Optional[int] = intermediate_size lowerCAmelCase_ :str = hidden_dropout_prob lowerCAmelCase_ :Any = attention_probs_dropout_prob lowerCAmelCase_ :str = max_position_embeddings lowerCAmelCase_ :int = type_vocab_size lowerCAmelCase_ :Tuple = initializer_range lowerCAmelCase_ :List[Any] = layer_norm_eps lowerCAmelCase_ :List[Any] = position_embedding_type lowerCAmelCase_ :List[Any] = use_cache lowerCAmelCase_ :List[Any] = classifier_dropout class _SCREAMING_SNAKE_CASE ( A__ ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCAmelCase_ :List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCAmelCase_ :List[str] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
84
1
"""simple docstring""" from math import sqrt def _snake_case ( lowercase__ : int ) -> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(lowercase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _snake_case ( lowercase__ : int = 1_0_0_0_1 ) -> int: '''simple docstring''' lowerCAmelCase_ :List[Any] = 0 lowerCAmelCase_ :List[str] = 1 while count != nth and number < 3: number += 1 if is_prime(lowercase__ ): count += 1 while count != nth: number += 2 if is_prime(lowercase__ ): count += 1 return number if __name__ == "__main__": print(F"""{solution() = }""")
84
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( lowercase__ : Dict , lowercase__ : Dict , lowercase__ : str , lowercase__ : Tuple="attention" ) -> str: '''simple docstring''' lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""] lowerCAmelCase_ :Union[str, Any] = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""] lowerCAmelCase_ :Any = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""] lowerCAmelCase_ :Optional[int] = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""] return k, o, q, v def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : Any=False ) -> int: '''simple docstring''' if split_mlp_wi: lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""] lowerCAmelCase_ :List[str] = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""] lowerCAmelCase_ :Tuple = (wi_a, wi_a) else: lowerCAmelCase_ :List[Any] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""] lowerCAmelCase_ :Dict = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""] return wi, wo def _snake_case ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] ) -> Tuple: '''simple docstring''' return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""] def _snake_case ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = traverse_util.flatten_dict(variables["""target"""] ) lowerCAmelCase_ :Tuple = {"""/""".join(lowercase__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowerCAmelCase_ :Any = """encoder/layers_0/mlp/wi_0/kernel""" in old print("""Split MLP:""" , lowercase__ ) lowerCAmelCase_ :List[Any] = collections.OrderedDict() # Shared embeddings. lowerCAmelCase_ :Optional[int] = old["""token_embedder/embedding"""] # Encoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" ) lowerCAmelCase_ :Optional[Any] = layer_norm lowerCAmelCase_ :Any = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Tuple = q.T lowerCAmelCase_ :str = v.T # Block i, layer 1 (MLP). lowerCAmelCase_ :Dict = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Any = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ ) lowerCAmelCase_ :Union[str, Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :List[Any] = wi[0].T lowerCAmelCase_ :Dict = wi[1].T else: lowerCAmelCase_ :int = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Tuple = old[ """encoder/relpos_bias/rel_embedding""" ].T lowerCAmelCase_ :List[str] = old["""encoder/encoder_norm/scale"""] if not is_encoder_only: # Decoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" ) lowerCAmelCase_ :List[Any] = layer_norm lowerCAmelCase_ :List[str] = k.T lowerCAmelCase_ :Any = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :Dict = v.T # Block i, layer 1 (Cross Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" ) lowerCAmelCase_ :Optional[int] = layer_norm lowerCAmelCase_ :str = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :int = v.T # Block i, layer 2 (MLP). lowerCAmelCase_ :Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Dict = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ ) lowerCAmelCase_ :List[Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :Any = wi[0].T lowerCAmelCase_ :Any = wi[1].T else: lowerCAmelCase_ :Tuple = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Optional[Any] = old["""decoder/decoder_norm/scale"""] lowerCAmelCase_ :Optional[Any] = old[ """decoder/relpos_bias/rel_embedding""" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowerCAmelCase_ :Tuple = old["""decoder/logits_dense/kernel"""].T return new def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Optional[int] = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Tuple = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) lowerCAmelCase_ :Any = state_dict["""shared.weight"""] return state_dict def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :List[Any] = checkpoints.load_tax_checkpoint(lowercase__ ) lowerCAmelCase_ :Optional[int] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ ) lowerCAmelCase_ :Union[str, Any] = make_state_dict(lowercase__ , lowercase__ ) model.load_state_dict(lowercase__ , strict=lowercase__ ) def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : bool = False ) -> Any: '''simple docstring''' lowerCAmelCase_ :Any = TaConfig.from_json_file(lowercase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowerCAmelCase_ :List[Any] = TaEncoderModel(lowercase__ ) else: lowerCAmelCase_ :List[str] = TaForConditionalGeneration(lowercase__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(lowercase__ ) # Verify that we can load the checkpoint. model.from_pretrained(lowercase__ ) print("""Done""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) __UpperCAmelCase = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
84
1
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Any = BioGptTokenizer UpperCAmelCase_ :str = False def __lowerCAmelCase ( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase_ :Optional[Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] lowerCAmelCase_ :str = dict(zip(__A , range(len(__A ) ) ) ) lowerCAmelCase_ :int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(__A ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(__A ) ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: lowerCAmelCase_ :List[Any] = """lower newer""" lowerCAmelCase_ :Tuple = """lower newer""" return input_text, output_text def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :List[str] = BioGptTokenizer(self.vocab_file , self.merges_file ) lowerCAmelCase_ :Union[str, Any] = """lower""" lowerCAmelCase_ :Any = ["""low""", """er</w>"""] lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Dict = tokens + ["""<unk>"""] lowerCAmelCase_ :List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) @slow def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) lowerCAmelCase_ :List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__A ) lowerCAmelCase_ :List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A ) lowerCAmelCase_ :Optional[int] = tokenizer.build_inputs_with_special_tokens(__A ) lowerCAmelCase_ :List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
84
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( lowercase__ : Optional[Any] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): lowerCAmelCase_ :Union[str, Any] = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): lowerCAmelCase_ :Any = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase_ :List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] lowerCAmelCase_ :Tuple = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowercase__ )-1}""" ) if "norm" in key: lowerCAmelCase_ :Dict = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase_ :str = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] lowerCAmelCase_ :str = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowercase__ )-1}""" ) if "layer_norm1" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: lowerCAmelCase_ :str = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase_ :List[str] = key[key.find("""block""" ) + len("""block""" )] lowerCAmelCase_ :int = key.replace(f"""block{idx}""" , f"""block.{int(lowercase__ )-1}""" ) if "attn.q" in key: lowerCAmelCase_ :Tuple = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: lowerCAmelCase_ :Optional[int] = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: lowerCAmelCase_ :str = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: lowerCAmelCase_ :List[Any] = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: lowerCAmelCase_ :List[str] = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: lowerCAmelCase_ :str = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) lowerCAmelCase_ :Any = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase_ :str = key[key.find("""linear_c""" ) + len("""linear_c""" )] lowerCAmelCase_ :Optional[int] = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowercase__ )-1}""" ) if "bot_conv" in key: lowerCAmelCase_ :Union[str, Any] = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: lowerCAmelCase_ :int = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: lowerCAmelCase_ :str = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: lowerCAmelCase_ :Any = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: lowerCAmelCase_ :List[str] = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: lowerCAmelCase_ :Dict = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: lowerCAmelCase_ :Any = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): lowerCAmelCase_ :Tuple = key.replace("""module.last_layer_depth""" , """head.head""" ) lowerCAmelCase_ :List[Any] = value return new_state_dict def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" ) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict lowerCAmelCase_ :Optional[Any] = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase_ :Union[str, Any] = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase_ :List[Any] = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase_ :int = kv_bias[config.hidden_sizes[i] :] def _snake_case ( ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCAmelCase_ :Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return image @torch.no_grad() def _snake_case ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Dict=False , lowercase__ : List[Any]=None ) -> int: '''simple docstring''' lowerCAmelCase_ :int = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] ) # load image processor (only resize + rescale) lowerCAmelCase_ :Union[str, Any] = GLPNImageProcessor() # prepare image lowerCAmelCase_ :List[Any] = prepare_img() lowerCAmelCase_ :int = image_processor(images=lowercase__ , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict lowerCAmelCase_ :Tuple = torch.load(lowercase__ , map_location=torch.device("""cpu""" ) ) # rename keys lowerCAmelCase_ :Union[str, Any] = rename_keys(lowercase__ ) # key and value matrices need special treatment read_in_k_v(lowercase__ , lowercase__ ) # create HuggingFace model and load state dict lowerCAmelCase_ :List[Any] = GLPNForDepthEstimation(lowercase__ ) model.load_state_dict(lowercase__ ) model.eval() # forward pass lowerCAmelCase_ :Dict = model(lowercase__ ) lowerCAmelCase_ :Tuple = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: lowerCAmelCase_ :Optional[Any] = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: lowerCAmelCase_ :Any = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(f"""Unknown model name: {model_name}""" ) lowerCAmelCase_ :Union[str, Any] = torch.Size([1, 4_8_0, 6_4_0] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , lowercase__ , atol=1E-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowercase__ , ) image_processor.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowercase__ , ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) parser.add_argument( '--model_name', default='glpn-kitti', type=str, help='Name of the model in case you\'re pushing to the hub.', ) __UpperCAmelCase = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
84
1
"""simple docstring""" from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def _snake_case ( lowercase__ : str , lowercase__ : float | Decimal , lowercase__ : float = 1_0**-1_0 ) -> float: '''simple docstring''' lowerCAmelCase_ :List[Any] = a while True: lowerCAmelCase_ :Any = Decimal(lowercase__ ) - ( Decimal(eval(lowercase__ ) ) / Decimal(eval(str(diff(lowercase__ ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(lowercase__ ) ) < precision: # noqa: S307 return float(lowercase__ ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""") # Find root of polynomial print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""") # Find Square Root of 5 print(F"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""") # Exponential Roots print(F"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
84
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
84
1
"""simple docstring""" import baseaa def _snake_case ( lowercase__ : str ) -> bytes: '''simple docstring''' return baseaa.aaaencode(string.encode("""utf-8""" ) ) def _snake_case ( lowercase__ : bytes ) -> str: '''simple docstring''' return baseaa.aaadecode(lowercase__ ).decode("""utf-8""" ) if __name__ == "__main__": import doctest doctest.testmod()
84
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json', # See all LeViT models at https://huggingface.co/models?filter=levit } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "levit" def __init__( self , __A=224 , __A=3 , __A=3 , __A=2 , __A=1 , __A=16 , __A=[128, 256, 384] , __A=[4, 8, 12] , __A=[4, 4, 4] , __A=[16, 16, 16] , __A=0 , __A=[2, 2, 2] , __A=[2, 2, 2] , __A=0.0_2 , **__A , ) -> Any: super().__init__(**__A ) lowerCAmelCase_ :Tuple = image_size lowerCAmelCase_ :Optional[int] = num_channels lowerCAmelCase_ :Union[str, Any] = kernel_size lowerCAmelCase_ :Optional[Any] = stride lowerCAmelCase_ :Optional[int] = padding lowerCAmelCase_ :Optional[Any] = hidden_sizes lowerCAmelCase_ :Optional[int] = num_attention_heads lowerCAmelCase_ :int = depths lowerCAmelCase_ :List[str] = key_dim lowerCAmelCase_ :str = drop_path_rate lowerCAmelCase_ :Optional[int] = patch_size lowerCAmelCase_ :Union[str, Any] = attention_ratio lowerCAmelCase_ :Dict = mlp_ratio lowerCAmelCase_ :Any = initializer_range lowerCAmelCase_ :Optional[int] = [ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Tuple = version.parse("1.11" ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __lowerCAmelCase ( self ) -> float: return 1E-4
84
1
"""simple docstring""" import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('1.0.0a'): raise Exception('requires fairseq >= 1.0.0a') logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = 'Hello world! cécé herlolip' def _snake_case ( lowercase__ : str , lowercase__ : str , lowercase__ : bool ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :str = FairseqRobertaModel.from_pretrained(lowercase__ ) roberta.eval() # disable dropout lowerCAmelCase_ :List[str] = roberta.model.encoder.sentence_encoder lowerCAmelCase_ :int = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , ) if classification_head: lowerCAmelCase_ :Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0] print("""Our RoBERTa config:""" , lowercase__ ) lowerCAmelCase_ :Optional[Any] = XLMRobertaXLForSequenceClassification(lowercase__ ) if classification_head else XLMRobertaXLForMaskedLM(lowercase__ ) model.eval() # Now let's copy all the weights. # Embeddings lowerCAmelCase_ :Any = roberta_sent_encoder.embed_tokens.weight lowerCAmelCase_ :Optional[int] = roberta_sent_encoder.embed_positions.weight lowerCAmelCase_ :Optional[Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. lowerCAmelCase_ :Tuple = roberta_sent_encoder.layer_norm.weight lowerCAmelCase_ :List[str] = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer lowerCAmelCase_ :BertLayer = model.roberta.encoder.layer[i] lowerCAmelCase_ :TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] lowerCAmelCase_ :RobertaAttention = layer.attention lowerCAmelCase_ :Tuple = roberta_layer.self_attn_layer_norm.weight lowerCAmelCase_ :Dict = roberta_layer.self_attn_layer_norm.bias # self attention lowerCAmelCase_ :BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) lowerCAmelCase_ :Union[str, Any] = roberta_layer.self_attn.q_proj.weight lowerCAmelCase_ :Union[str, Any] = roberta_layer.self_attn.q_proj.bias lowerCAmelCase_ :List[Any] = roberta_layer.self_attn.k_proj.weight lowerCAmelCase_ :Tuple = roberta_layer.self_attn.k_proj.bias lowerCAmelCase_ :Union[str, Any] = roberta_layer.self_attn.v_proj.weight lowerCAmelCase_ :Tuple = roberta_layer.self_attn.v_proj.bias # self-attention output lowerCAmelCase_ :BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape lowerCAmelCase_ :Optional[Any] = roberta_layer.self_attn.out_proj.weight lowerCAmelCase_ :Dict = roberta_layer.self_attn.out_proj.bias # this one is final layer norm lowerCAmelCase_ :int = roberta_layer.final_layer_norm.weight lowerCAmelCase_ :str = roberta_layer.final_layer_norm.bias # intermediate lowerCAmelCase_ :BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape lowerCAmelCase_ :Optional[Any] = roberta_layer.fca.weight lowerCAmelCase_ :Tuple = roberta_layer.fca.bias # output lowerCAmelCase_ :BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape lowerCAmelCase_ :int = roberta_layer.fca.weight lowerCAmelCase_ :Optional[int] = roberta_layer.fca.bias # end of layer if classification_head: lowerCAmelCase_ :str = roberta.model.classification_heads["""mnli"""].dense.weight lowerCAmelCase_ :Union[str, Any] = roberta.model.classification_heads["""mnli"""].dense.bias lowerCAmelCase_ :Tuple = roberta.model.classification_heads["""mnli"""].out_proj.weight lowerCAmelCase_ :int = roberta.model.classification_heads["""mnli"""].out_proj.bias else: # LM Head lowerCAmelCase_ :List[Any] = roberta.model.encoder.lm_head.dense.weight lowerCAmelCase_ :Optional[Any] = roberta.model.encoder.lm_head.dense.bias lowerCAmelCase_ :Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight lowerCAmelCase_ :List[Any] = roberta.model.encoder.lm_head.layer_norm.bias lowerCAmelCase_ :Tuple = roberta.model.encoder.lm_head.weight lowerCAmelCase_ :List[str] = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. lowerCAmelCase_ :torch.Tensor = roberta.encode(lowercase__ ).unsqueeze(0 ) # batch of size 1 lowerCAmelCase_ :Any = model(lowercase__ )[0] if classification_head: lowerCAmelCase_ :str = roberta.model.classification_heads["""mnli"""](roberta.extract_features(lowercase__ ) ) else: lowerCAmelCase_ :List[Any] = roberta.model(lowercase__ )[0] print(our_output.shape , their_output.shape ) lowerCAmelCase_ :Dict = torch.max(torch.abs(our_output - their_output ) ).item() print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7 lowerCAmelCase_ :Union[str, Any] = torch.allclose(lowercase__ , lowercase__ , atol=1E-3 ) print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" ) if not success: raise Exception("""Something went wRoNg""" ) pathlib.Path(lowercase__ ).mkdir(parents=lowercase__ , exist_ok=lowercase__ ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase__ ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--classification_head', action='store_true', help='Whether to convert a final classification head.' ) __UpperCAmelCase = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
84
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def _snake_case ( lowercase__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = FileLock(str(tmpdir / """foo.lock""" ) ) lowerCAmelCase_ :Union[str, Any] = FileLock(str(tmpdir / """foo.lock""" ) ) lowerCAmelCase_ :Dict = 0.01 with locka.acquire(): with pytest.raises(lowercase__ ): lowerCAmelCase_ :List[Any] = time.time() locka.acquire(lowercase__ ) assert time.time() - _start > timeout def _snake_case ( lowercase__ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = """a""" * 1_0_0_0 + """.lock""" lowerCAmelCase_ :Optional[Any] = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(lowercase__ ) assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5 lowerCAmelCase_ :Any = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(lowercase__ ): locka.acquire(0 )
84
1
"""simple docstring""" import os def _snake_case ( lowercase__ : str = "input.txt" ) -> int: '''simple docstring''' with open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) as input_file: lowerCAmelCase_ :Union[str, Any] = [ [int(lowercase__ ) for element in line.split(""",""" )] for line in input_file.readlines() ] lowerCAmelCase_ :List[Any] = len(lowercase__ ) lowerCAmelCase_ :Dict = len(matrix[0] ) lowerCAmelCase_ :Optional[int] = [[-1 for _ in range(lowercase__ )] for _ in range(lowercase__ )] for i in range(lowercase__ ): lowerCAmelCase_ :List[str] = matrix[i][0] for j in range(1 , lowercase__ ): for i in range(lowercase__ ): lowerCAmelCase_ :List[Any] = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , lowercase__ ): lowerCAmelCase_ :Dict = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowerCAmelCase_ :Any = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(F"""{solution() = }""")
84
"""simple docstring""" from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __UpperCAmelCase = 1.054571817e-34 # unit of ℏ : J * s __UpperCAmelCase = 3e8 # unit of c : m * s^-1 def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> dict[str, float]: '''simple docstring''' if (force, area, distance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if force < 0: raise ValueError("""Magnitude of force can not be negative""" ) if distance < 0: raise ValueError("""Distance can not be negative""" ) if area < 0: raise ValueError("""Area can not be negative""" ) if force == 0: lowerCAmelCase_ :Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_4_0 * (distance) ** 4 ) return {"force": force} elif area == 0: lowerCAmelCase_ :Optional[Any] = (2_4_0 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: lowerCAmelCase_ :Any = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("""One and only one argument must be 0""" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
84
1
"""simple docstring""" from __future__ import annotations def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool: '''simple docstring''' lowerCAmelCase_ :int = get_failure_array(lowercase__ ) # 2) Step through text searching for pattern lowerCAmelCase_ , lowerCAmelCase_ :Any = 0, 0 # index into text, pattern while i < len(lowercase__ ): if pattern[j] == text[i]: if j == (len(lowercase__ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: lowerCAmelCase_ :Union[str, Any] = failure[j - 1] continue i += 1 return False def _snake_case ( lowercase__ : str ) -> list[int]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = [0] lowerCAmelCase_ :Optional[int] = 0 lowerCAmelCase_ :Any = 1 while j < len(lowercase__ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: lowerCAmelCase_ :Union[str, Any] = failure[i - 1] continue j += 1 failure.append(lowercase__ ) return failure if __name__ == "__main__": # Test 1) __UpperCAmelCase = 'abc1abc12' __UpperCAmelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc' __UpperCAmelCase = 'alskfjaldsk23adsfabcabc' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) __UpperCAmelCase = 'ABABX' __UpperCAmelCase = 'ABABZABABYABABX' assert kmp(pattern, text) # Test 3) __UpperCAmelCase = 'AAAB' __UpperCAmelCase = 'ABAAAAAB' assert kmp(pattern, text) # Test 4) __UpperCAmelCase = 'abcdabcy' __UpperCAmelCase = 'abcxabcdabxabcdabcdabcy' assert kmp(pattern, text) # Test 5) __UpperCAmelCase = 'aabaabaaa' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
84
"""simple docstring""" def _snake_case ( lowercase__ : str , lowercase__ : str ) -> int: '''simple docstring''' if len(lowercase__ ) != len(lowercase__ ): raise ValueError("""String lengths must match!""" ) lowerCAmelCase_ :Optional[int] = 0 for chara, chara in zip(lowercase__ , lowercase__ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
84
1
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Optional[int] = ["image_processor", "tokenizer"] UpperCAmelCase_ :Optional[int] = "Pix2StructImageProcessor" UpperCAmelCase_ :str = ("T5Tokenizer", "T5TokenizerFast") def __init__( self , __A , __A ) -> int: lowerCAmelCase_ :Union[str, Any] = False super().__init__(__A , __A ) def __call__( self , __A=None , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 2048 , __A = 0 , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ) -> BatchEncoding: if images is None and text is None: raise ValueError("""You have to specify either images or text.""" ) # Get only text if images is None and not self.image_processor.is_vqa: lowerCAmelCase_ :Tuple = self.tokenizer lowerCAmelCase_ :Dict = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) return text_encoding if not self.image_processor.is_vqa: # add pixel_values lowerCAmelCase_ :List[str] = self.image_processor( __A , return_tensors=__A , max_patches=__A , **__A ) else: # add pixel_values and bbox lowerCAmelCase_ :int = self.image_processor( __A , return_tensors=__A , max_patches=__A , header_text=__A , **__A ) if text is not None and not self.image_processor.is_vqa: lowerCAmelCase_ :Union[str, Any] = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) if "attention_mask" in text_encoding: lowerCAmelCase_ :List[str] = text_encoding.pop("""attention_mask""" ) if "input_ids" in text_encoding: lowerCAmelCase_ :List[str] = text_encoding.pop("""input_ids""" ) else: lowerCAmelCase_ :Optional[Any] = None if text_encoding is not None: encoding_image_processor.update(__A ) return encoding_image_processor def __lowerCAmelCase ( self , *__A , **__A ) -> Any: return self.tokenizer.batch_decode(*__A , **__A ) def __lowerCAmelCase ( self , *__A , **__A ) -> Dict: return self.tokenizer.decode(*__A , **__A ) @property def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :List[str] = self.tokenizer.model_input_names lowerCAmelCase_ :Tuple = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
84
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A ) -> Optional[Any]: super().__init__() lowerCAmelCase_ :int = nn.ModuleList(__A ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A = None , __A = None , __A = None , __A = None , __A = False , __A = True , ) -> Union[ControlNetOutput, Tuple]: for i, (image, scale, controlnet) in enumerate(zip(__A , __A , self.nets ) ): lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = controlnet( __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) # merge samples if i == 0: lowerCAmelCase_ , lowerCAmelCase_ :Tuple = down_samples, mid_sample else: lowerCAmelCase_ :str = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__A , __A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __lowerCAmelCase ( self , __A , __A = True , __A = None , __A = False , __A = None , ) -> Optional[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = save_directory for controlnet in self.nets: controlnet.save_pretrained( __A , is_main_process=__A , save_function=__A , safe_serialization=__A , variant=__A , ) idx += 1 lowerCAmelCase_ :Any = model_path_to_save + f"""_{idx}""" @classmethod def __lowerCAmelCase ( cls , __A , **__A ) -> List[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... lowerCAmelCase_ :List[Any] = pretrained_model_path while os.path.isdir(__A ): lowerCAmelCase_ :Tuple = ControlNetModel.from_pretrained(__A , **__A ) controlnets.append(__A ) idx += 1 lowerCAmelCase_ :Dict = pretrained_model_path + f"""_{idx}""" logger.info(f"""{len(__A )} controlnets loaded from {pretrained_model_path}.""" ) if len(__A ) == 0: raise ValueError( f"""No ControlNets found under {os.path.dirname(__A )}. Expected at least {pretrained_model_path + "_0"}.""" ) return cls(__A )
84
1
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A ) -> Optional[Any]: super().__init__() lowerCAmelCase_ :int = nn.ModuleList(__A ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A = None , __A = None , __A = None , __A = None , __A = False , __A = True , ) -> Union[ControlNetOutput, Tuple]: for i, (image, scale, controlnet) in enumerate(zip(__A , __A , self.nets ) ): lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = controlnet( __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) # merge samples if i == 0: lowerCAmelCase_ , lowerCAmelCase_ :Tuple = down_samples, mid_sample else: lowerCAmelCase_ :str = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__A , __A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __lowerCAmelCase ( self , __A , __A = True , __A = None , __A = False , __A = None , ) -> Optional[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = save_directory for controlnet in self.nets: controlnet.save_pretrained( __A , is_main_process=__A , save_function=__A , safe_serialization=__A , variant=__A , ) idx += 1 lowerCAmelCase_ :Any = model_path_to_save + f"""_{idx}""" @classmethod def __lowerCAmelCase ( cls , __A , **__A ) -> List[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... lowerCAmelCase_ :List[Any] = pretrained_model_path while os.path.isdir(__A ): lowerCAmelCase_ :Tuple = ControlNetModel.from_pretrained(__A , **__A ) controlnets.append(__A ) idx += 1 lowerCAmelCase_ :Dict = pretrained_model_path + f"""_{idx}""" logger.info(f"""{len(__A )} controlnets loaded from {pretrained_model_path}.""" ) if len(__A ) == 0: raise ValueError( f"""No ControlNets found under {os.path.dirname(__A )}. Expected at least {pretrained_model_path + "_0"}.""" ) return cls(__A )
84
"""simple docstring""" from PIL import Image def _snake_case ( lowercase__ : Image , lowercase__ : float ) -> Image: '''simple docstring''' def brightness(lowercase__ : int ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(lowercase__ ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 __UpperCAmelCase = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
84
1
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): # TODO: is there an appropriate internal test set? UpperCAmelCase_ :List[Any] = "ssube/stable-diffusion-x4-upscaler-onnx" def __lowerCAmelCase ( self , __A=0 ) -> Optional[int]: lowerCAmelCase_ :Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__A ) ) lowerCAmelCase_ :List[Any] = torch.manual_seed(__A ) lowerCAmelCase_ :Tuple = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Dict = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :int = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :int = self.get_dummy_inputs() lowerCAmelCase_ :List[str] = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :str = np.array( [0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Union[str, Any] = pipe(**__A ).images lowerCAmelCase_ :Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Tuple = np.array( [0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Union[str, Any] = self.get_dummy_inputs() lowerCAmelCase_ :Optional[Any] = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Tuple = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Dict = pipe(**__A ).images lowerCAmelCase_ :Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Dict = np.array( [0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @property def __lowerCAmelCase ( self ) -> List[Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Optional[int] = ort.SessionOptions() lowerCAmelCase_ :Dict = False return options def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase_ :Optional[Any] = init_image.resize((128, 128) ) # using the PNDM scheduler by default lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Union[str, Any] = """A fantasy landscape, trending on artstation""" lowerCAmelCase_ :List[Any] = torch.manual_seed(0 ) lowerCAmelCase_ :str = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :Dict = output.images lowerCAmelCase_ :List[str] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase_ :Optional[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase_ :List[str] = init_image.resize((128, 128) ) lowerCAmelCase_ :Any = LMSDiscreteScheduler.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" ) lowerCAmelCase_ :Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Any = """A fantasy landscape, trending on artstation""" lowerCAmelCase_ :Optional[Any] = torch.manual_seed(0 ) lowerCAmelCase_ :List[str] = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=20 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :int = output.images lowerCAmelCase_ :List[Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase_ :Union[str, Any] = np.array( [0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
84
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class _SCREAMING_SNAKE_CASE : def __lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) lowerCAmelCase_ :int = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :List[Any] = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase_ :str = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) lowerCAmelCase_ :int = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ :Dict = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase_ :str = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[int] = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Dict = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Dict = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Any = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Optional[int] = inputs["""prompt"""] lowerCAmelCase_ :Optional[int] = inputs["""generator"""] lowerCAmelCase_ :Any = inputs["""num_inference_steps"""] lowerCAmelCase_ :Optional[int] = inputs["""output_type"""] if "image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""image"""] else: lowerCAmelCase_ :int = None if "mask_image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""mask_image"""] else: lowerCAmelCase_ :int = None if "original_image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""original_image"""] else: lowerCAmelCase_ :List[Any] = None lowerCAmelCase_ , lowerCAmelCase_ :int = pipe.encode_prompt(__A ) # inputs with prompt converted to embeddings lowerCAmelCase_ :List[str] = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: lowerCAmelCase_ :int = image if mask_image is not None: lowerCAmelCase_ :Tuple = mask_image if original_image is not None: lowerCAmelCase_ :Optional[Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(__A , __A , __A ) lowerCAmelCase_ :Optional[int] = pipe(**__A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__A ) lowerCAmelCase_ :Optional[int] = self.pipeline_class.from_pretrained(__A ) pipe_loaded.to(__A ) pipe_loaded.set_progress_bar_config(disable=__A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(__A , __A ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) lowerCAmelCase_ :Dict = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Union[str, Any] = inputs["""generator"""] lowerCAmelCase_ :Any = inputs["""num_inference_steps"""] lowerCAmelCase_ :Tuple = inputs["""output_type"""] # inputs with prompt converted to embeddings lowerCAmelCase_ :Tuple = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: lowerCAmelCase_ :Optional[int] = image if mask_image is not None: lowerCAmelCase_ :str = mask_image if original_image is not None: lowerCAmelCase_ :Tuple = original_image lowerCAmelCase_ :Union[str, Any] = pipe_loaded(**__A )[0] lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max() self.assertLess(__A , 1E-4 ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Any = self.get_dummy_components() lowerCAmelCase_ :Optional[int] = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[int] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Dict = pipe(**__A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__A ) lowerCAmelCase_ :Any = self.pipeline_class.from_pretrained(__A ) pipe_loaded.to(__A ) pipe_loaded.set_progress_bar_config(disable=__A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests lowerCAmelCase_ :List[Any] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :str = pipe_loaded(**__A )[0] lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max() self.assertLess(__A , 1E-4 )
84
1
"""simple docstring""" __UpperCAmelCase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __UpperCAmelCase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def _snake_case ( lowercase__ : dict[int, list[int]] , lowercase__ : int , lowercase__ : list[bool] ) -> list[int]: '''simple docstring''' lowerCAmelCase_ :List[str] = True lowerCAmelCase_ :Any = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(lowercase__ , lowercase__ , lowercase__ ) order.append(lowercase__ ) return order def _snake_case ( lowercase__ : dict[int, list[int]] , lowercase__ : int , lowercase__ : list[bool] ) -> list[int]: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = True lowerCAmelCase_ :Optional[Any] = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(lowercase__ , lowercase__ , lowercase__ ) return component def _snake_case ( lowercase__ : dict[int, list[int]] ) -> list[list[int]]: '''simple docstring''' lowerCAmelCase_ :str = len(lowercase__ ) * [False] lowerCAmelCase_ :dict[int, list[int]] = {vert: [] for vert in range(len(lowercase__ ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(lowercase__ ) lowerCAmelCase_ :Any = [] for i, was_visited in enumerate(lowercase__ ): if not was_visited: order += topology_sort(lowercase__ , lowercase__ , lowercase__ ) lowerCAmelCase_ :Dict = [] lowerCAmelCase_ :Optional[Any] = len(lowercase__ ) * [False] for i in range(len(lowercase__ ) ): lowerCAmelCase_ :str = order[len(lowercase__ ) - i - 1] if not visited[vert]: lowerCAmelCase_ :List[Any] = find_components(lowercase__ , lowercase__ , lowercase__ ) components_list.append(lowercase__ ) return components_list
84
"""simple docstring""" import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) lowerCAmelCase_ :int = """A painting of a squirrel eating a burger""" lowerCAmelCase_ :List[Any] = jax.device_count() lowerCAmelCase_ :Optional[Any] = num_samples * [prompt] lowerCAmelCase_ :int = sd_pipe.prepare_inputs(__A ) lowerCAmelCase_ :Optional[Any] = replicate(__A ) lowerCAmelCase_ :Union[str, Any] = shard(__A ) lowerCAmelCase_ :Optional[Any] = jax.random.PRNGKey(0 ) lowerCAmelCase_ :Tuple = jax.random.split(__A , jax.device_count() ) lowerCAmelCase_ :Union[str, Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowerCAmelCase_ :Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1] lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCAmelCase_ :Optional[int] = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Union[str, Any] = """stabilityai/stable-diffusion-2""" lowerCAmelCase_ , lowerCAmelCase_ :Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__A , subfolder="""scheduler""" ) lowerCAmelCase_ , lowerCAmelCase_ :List[str] = FlaxStableDiffusionPipeline.from_pretrained( __A , scheduler=__A , revision="""bf16""" , dtype=jnp.bfloataa , ) lowerCAmelCase_ :Optional[int] = scheduler_params lowerCAmelCase_ :List[Any] = """A painting of a squirrel eating a burger""" lowerCAmelCase_ :Tuple = jax.device_count() lowerCAmelCase_ :str = num_samples * [prompt] lowerCAmelCase_ :Union[str, Any] = sd_pipe.prepare_inputs(__A ) lowerCAmelCase_ :Tuple = replicate(__A ) lowerCAmelCase_ :Optional[int] = shard(__A ) lowerCAmelCase_ :List[str] = jax.random.PRNGKey(0 ) lowerCAmelCase_ :List[Any] = jax.random.split(__A , jax.device_count() ) lowerCAmelCase_ :Optional[Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowerCAmelCase_ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1] lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCAmelCase_ :Dict = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
84
1
"""simple docstring""" def _snake_case ( lowercase__ : Dict ) -> List[Any]: '''simple docstring''' stooge(lowercase__ , 0 , len(lowercase__ ) - 1 ) return arr def _snake_case ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Optional[Any] ) -> int: '''simple docstring''' if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: lowerCAmelCase_ , lowerCAmelCase_ :int = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: lowerCAmelCase_ :Any = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) # Recursively sort last 2/3 elements stooge(lowercase__ , i + t , (lowercase__) ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) if __name__ == "__main__": __UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip() __UpperCAmelCase = [int(item) for item in user_input.split(',')] print(stooge_sort(unsorted))
84
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def _snake_case ( ) -> Generator[int, None, None]: '''simple docstring''' lowerCAmelCase_ :dict[int, int] = {} lowerCAmelCase_ :int = 2 while True: lowerCAmelCase_ :List[Any] = factor_map.pop(lowercase__ , lowercase__ ) if factor: lowerCAmelCase_ :Optional[int] = factor + prime while x in factor_map: x += factor lowerCAmelCase_ :List[str] = factor else: lowerCAmelCase_ :Optional[int] = prime yield prime prime += 1 def _snake_case ( lowercase__ : float = 1E10 ) -> int: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = sieve() lowerCAmelCase_ :str = 1 while True: lowerCAmelCase_ :int = next(lowercase__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(lowercase__ ) n += 2 if __name__ == "__main__": print(solution())
84
1
"""simple docstring""" import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Union[str, Any] = CTRLTokenizer UpperCAmelCase_ :Dict = False UpperCAmelCase_ :List[Any] = False def __lowerCAmelCase ( self ) -> Union[str, Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase_ :Optional[int] = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""] lowerCAmelCase_ :List[Any] = dict(zip(__A , range(len(__A ) ) ) ) lowerCAmelCase_ :int = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""] lowerCAmelCase_ :Optional[Any] = {"""unk_token""": """<unk>"""} lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__A ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__A ) ) def __lowerCAmelCase ( self , **__A ) -> Dict: kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **__A ) def __lowerCAmelCase ( self , __A ) -> str: lowerCAmelCase_ :Dict = """adapt react readapt apt""" lowerCAmelCase_ :Union[str, Any] = """adapt react readapt apt""" return input_text, output_text def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :List[Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCAmelCase_ :List[str] = """adapt react readapt apt""" lowerCAmelCase_ :Any = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split() lowerCAmelCase_ :List[Any] = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Tuple = tokens + [tokenizer.unk_token] lowerCAmelCase_ :Dict = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
84
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): # TODO: is there an appropriate internal test set? UpperCAmelCase_ :List[Any] = "ssube/stable-diffusion-x4-upscaler-onnx" def __lowerCAmelCase ( self , __A=0 ) -> Optional[int]: lowerCAmelCase_ :Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__A ) ) lowerCAmelCase_ :List[Any] = torch.manual_seed(__A ) lowerCAmelCase_ :Tuple = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Dict = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :int = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :int = self.get_dummy_inputs() lowerCAmelCase_ :List[str] = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :str = np.array( [0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Union[str, Any] = pipe(**__A ).images lowerCAmelCase_ :Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Tuple = np.array( [0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Union[str, Any] = self.get_dummy_inputs() lowerCAmelCase_ :Optional[Any] = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Tuple = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Dict = pipe(**__A ).images lowerCAmelCase_ :Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Dict = np.array( [0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @property def __lowerCAmelCase ( self ) -> List[Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Optional[int] = ort.SessionOptions() lowerCAmelCase_ :Dict = False return options def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase_ :Optional[Any] = init_image.resize((128, 128) ) # using the PNDM scheduler by default lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Union[str, Any] = """A fantasy landscape, trending on artstation""" lowerCAmelCase_ :List[Any] = torch.manual_seed(0 ) lowerCAmelCase_ :str = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :Dict = output.images lowerCAmelCase_ :List[str] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase_ :Optional[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase_ :List[str] = init_image.resize((128, 128) ) lowerCAmelCase_ :Any = LMSDiscreteScheduler.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" ) lowerCAmelCase_ :Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Any = """A fantasy landscape, trending on artstation""" lowerCAmelCase_ :Optional[Any] = torch.manual_seed(0 ) lowerCAmelCase_ :List[str] = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=20 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :int = output.images lowerCAmelCase_ :List[Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase_ :Union[str, Any] = np.array( [0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
84
1
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def _snake_case ( ) -> Generator[int, None, None]: '''simple docstring''' lowerCAmelCase_ :dict[int, int] = {} lowerCAmelCase_ :int = 2 while True: lowerCAmelCase_ :List[Any] = factor_map.pop(lowercase__ , lowercase__ ) if factor: lowerCAmelCase_ :Optional[int] = factor + prime while x in factor_map: x += factor lowerCAmelCase_ :List[str] = factor else: lowerCAmelCase_ :Optional[int] = prime yield prime prime += 1 def _snake_case ( lowercase__ : float = 1E10 ) -> int: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = sieve() lowerCAmelCase_ :str = 1 while True: lowerCAmelCase_ :int = next(lowercase__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(lowercase__ ) n += 2 if __name__ == "__main__": print(solution())
84
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Dict: if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=__A , ) assert hasattr(self , """env""" ) def __lowerCAmelCase ( self , __A ) -> Any: # configuration for running training on smdistributed Model Parallel lowerCAmelCase_ :Union[str, Any] = { """enabled""": True, """processes_per_host""": 8, } lowerCAmelCase_ :Tuple = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCAmelCase_ :Any = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCAmelCase_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__A , instance_type=self.instance_type , debugger_hook_config=__A , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=__A , py_version="""py36""" , ) def __lowerCAmelCase ( self , __A ) -> List[Any]: TrainingJobAnalytics(__A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def __lowerCAmelCase ( self , __A ) -> List[str]: # create estimator lowerCAmelCase_ :Any = self.create_estimator(__A ) # run training estimator.fit() # result dataframe lowerCAmelCase_ :Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase_ :List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase_ :Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase_ :Optional[int] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __A )
84
1
"""simple docstring""" import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser __UpperCAmelCase = logging.getLogger(__name__) torch.set_grad_enabled(False) __UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu' def _snake_case ( lowercase__ : str , lowercase__ : int=1_0_0 , lowercase__ : int=" " ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Tuple = text.split(lowercase__ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowercase__ ) , lowercase__ )] def _snake_case ( lowercase__ : dict ) -> dict: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = [], [] for title, text in zip(documents["""title"""] , documents["""text"""] ): if text is not None: for passage in split_text(lowercase__ ): titles.append(title if title is not None else """""" ) texts.append(lowercase__ ) return {"title": titles, "text": texts} def _snake_case ( lowercase__ : dict , lowercase__ : DPRContextEncoder , lowercase__ : DPRContextEncoderTokenizerFast ) -> dict: '''simple docstring''' lowerCAmelCase_ :Tuple = ctx_tokenizer( documents["""title"""] , documents["""text"""] , truncation=lowercase__ , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""] lowerCAmelCase_ :int = ctx_encoder(input_ids.to(device=lowercase__ ) , return_dict=lowercase__ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def _snake_case ( lowercase__ : "RagExampleArguments" , lowercase__ : "ProcessingArguments" , lowercase__ : "IndexHnswArguments" , ) -> Optional[Any]: '''simple docstring''' logger.info("""Step 1 - Create the dataset""" ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowerCAmelCase_ :Tuple = load_dataset( """csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowerCAmelCase_ :str = dataset.map(lowercase__ , batched=lowercase__ , num_proc=processing_args.num_proc ) # And compute the embeddings lowerCAmelCase_ :Optional[int] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowercase__ ) lowerCAmelCase_ :List[Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) lowerCAmelCase_ :str = Features( {"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space lowerCAmelCase_ :str = dataset.map( partial(lowercase__ , ctx_encoder=lowercase__ , ctx_tokenizer=lowercase__ ) , batched=lowercase__ , batch_size=processing_args.batch_size , features=lowercase__ , ) # And finally save your dataset lowerCAmelCase_ :Dict = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" ) dataset.save_to_disk(lowercase__ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("""Step 2 - Index the dataset""" ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowerCAmelCase_ :Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index("""embeddings""" , custom_index=lowercase__ ) # And save the index lowerCAmelCase_ :Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" ) dataset.get_index("""embeddings""" ).save(lowercase__ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class _SCREAMING_SNAKE_CASE : UpperCAmelCase_ :str = field( default=str(Path(A__ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , ) UpperCAmelCase_ :Optional[str] = field( default=A__ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , ) UpperCAmelCase_ :str = field( default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , ) UpperCAmelCase_ :str = field( default="facebook/dpr-ctx_encoder-multiset-base" , metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } , ) UpperCAmelCase_ :Optional[str] = field( default=str(Path(A__ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , ) @dataclass class _SCREAMING_SNAKE_CASE : UpperCAmelCase_ :Optional[int] = field( default=A__ , metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } , ) UpperCAmelCase_ :int = field( default=16 , metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } , ) @dataclass class _SCREAMING_SNAKE_CASE : UpperCAmelCase_ :int = field( default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , ) UpperCAmelCase_ :int = field( default=128 , metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) __UpperCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: __UpperCAmelCase = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
84
"""simple docstring""" def _snake_case ( lowercase__ : int = 1_0 ) -> str: '''simple docstring''' if not isinstance(lowercase__ , lowercase__ ) or n < 0: raise ValueError("""Invalid input""" ) lowerCAmelCase_ :List[str] = 1_0**n lowerCAmelCase_ :int = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , lowercase__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"""{solution(10) = }""")
84
1
"""simple docstring""" # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES __UpperCAmelCase = 'tiny-wmt19-en-ru' # Build # borrowed from a test __UpperCAmelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] __UpperCAmelCase = dict(zip(vocab, range(len(vocab)))) __UpperCAmelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] with tempfile.TemporaryDirectory() as tmpdirname: __UpperCAmelCase = Path(tmpdirname) __UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['src_vocab_file'] __UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file'] __UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['merges_file'] with open(src_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, 'w') as fp: fp.write('\n'.join(merges)) __UpperCAmelCase = FSMTTokenizer( langs=['en', 'ru'], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) __UpperCAmelCase = FSMTConfig( langs=['ru', 'en'], src_vocab_size=10_00, tgt_vocab_size=10_00, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) __UpperCAmelCase = FSMTForConditionalGeneration(config) print(F"""num of params {tiny_model.num_parameters()}""") # Test __UpperCAmelCase = tokenizer(['Making tiny model'], return_tensors='pt') __UpperCAmelCase = tiny_model(**batch) print('test output:', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"""Generated {mname_tiny}""") # Upload # transformers-cli upload tiny-wmt19-en-ru
84
"""simple docstring""" import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __UpperCAmelCase = 'src/transformers' __UpperCAmelCase = 'docs/source/en/tasks' def _snake_case ( lowercase__ : str , lowercase__ : List[str] , lowercase__ : Any ) -> str: '''simple docstring''' with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCAmelCase_ :List[Any] = f.readlines() # Find the start prompt. lowerCAmelCase_ :Tuple = 0 while not lines[start_index].startswith(lowercase__ ): start_index += 1 start_index += 1 lowerCAmelCase_ :Dict = start_index while not lines[end_index].startswith(lowercase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH) __UpperCAmelCase = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __UpperCAmelCase = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def _snake_case ( lowercase__ : List[str] ) -> str: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide] lowerCAmelCase_ :List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowercase__ , set() ) lowerCAmelCase_ :Union[str, Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def _snake_case ( lowercase__ : int , lowercase__ : str=False ) -> Dict: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = _find_text_in_file( filename=os.path.join(lowercase__ , lowercase__ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , ) lowerCAmelCase_ :int = get_model_list_for_task(lowercase__ ) if current_list != new_list: if overwrite: with open(os.path.join(lowercase__ , lowercase__ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" """ to fix this.""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __UpperCAmelCase = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
84
1
"""simple docstring""" from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : List[str] , lowercase__ : List[Any] ) -> Optional[Any]: '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def _snake_case ( lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : List[Any]=True ) -> Any: '''simple docstring''' model.train() lowerCAmelCase_ :Optional[int] = model(lowercase__ ) lowerCAmelCase_ :Union[str, Any] = F.mse_loss(lowercase__ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowercase__ ) def _snake_case ( lowercase__ : Tuple , lowercase__ : int=False ) -> List[Any]: '''simple docstring''' set_seed(4_2 ) lowerCAmelCase_ :Any = RegressionModel() lowerCAmelCase_ :str = deepcopy(lowercase__ ) lowerCAmelCase_ :Any = RegressionDataset(length=8_0 ) lowerCAmelCase_ :List[Any] = DataLoader(lowercase__ , batch_size=1_6 ) model.to(accelerator.device ) if sched: lowerCAmelCase_ :str = AdamW(params=model.parameters() , lr=1E-3 ) lowerCAmelCase_ :List[Any] = AdamW(params=ddp_model.parameters() , lr=1E-3 ) lowerCAmelCase_ :Optional[int] = LambdaLR(lowercase__ , lr_lambda=lambda lowercase__ : epoch**0.65 ) lowerCAmelCase_ :str = LambdaLR(lowercase__ , lr_lambda=lambda lowercase__ : epoch**0.65 ) # Make a copy of `model` if sched: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) else: lowerCAmelCase_ , lowerCAmelCase_ :Tuple = accelerator.prepare(lowercase__ , lowercase__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def _snake_case ( lowercase__ : int ) -> Tuple: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = get_training_setup(lowercase__ ) # Use a single batch lowerCAmelCase_ , lowerCAmelCase_ :Any = next(iter(lowercase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = accelerator.gather((ddp_input, ddp_target) ) lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase__ ): step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) else: # Sync grads step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) lowerCAmelCase_ :Tuple = ddp_input[torch.randperm(len(lowercase__ ) )] def _snake_case ( lowercase__ : Tuple ) -> str: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = get_training_setup(lowercase__ ) # Use a single batch lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = next(iter(lowercase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowerCAmelCase_ , lowerCAmelCase_ :int = accelerator.gather((ddp_input, ddp_target) ) lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase__ ): step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) else: # Sync grads step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) lowerCAmelCase_ :List[Any] = ddp_input[torch.randperm(len(lowercase__ ) )] def _snake_case ( lowercase__ : List[Any]=False , lowercase__ : List[Any]=False ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :Any = Accelerator( split_batches=lowercase__ , dispatch_batches=lowercase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Dict = get_training_setup(lowercase__ ) for iteration, batch in enumerate(lowercase__ ): lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = batch.values() # Gather the distributed inputs and targs for the base model lowerCAmelCase_ , lowerCAmelCase_ :Dict = accelerator.gather((ddp_input, ddp_target) ) lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowercase__ ): step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) lowerCAmelCase_ :List[str] = ddp_input[torch.randperm(len(lowercase__ ) )] GradientState._reset_state() def _snake_case ( lowercase__ : List[Any]=False , lowercase__ : List[str]=False ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = Accelerator( split_batches=lowercase__ , dispatch_batches=lowercase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :int = get_training_setup(lowercase__ , lowercase__ ) for iteration, batch in enumerate(lowercase__ ): lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = batch.values() # Gather the distributed inputs and targs for the base model lowerCAmelCase_ , lowerCAmelCase_ :List[str] = accelerator.gather((ddp_input, ddp_target) ) lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowercase__ ): step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" lowerCAmelCase_ :str = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase__ )) if accelerator.num_processes > 1: check_model_parameters(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) GradientState._reset_state() def _snake_case ( ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = Accelerator() lowerCAmelCase_ :Any = RegressionDataset(length=8_0 ) lowerCAmelCase_ :str = DataLoader(lowercase__ , batch_size=1_6 ) lowerCAmelCase_ :str = RegressionDataset(length=9_6 ) lowerCAmelCase_ :Tuple = DataLoader(lowercase__ , batch_size=1_6 ) lowerCAmelCase_ , lowerCAmelCase_ :int = accelerator.prepare(lowercase__ , lowercase__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowercase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase__ ) if iteration < len(lowercase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowercase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase__ ) if batch_num < len(lowercase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def _snake_case ( ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = Accelerator() lowerCAmelCase_ :List[str] = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(lowercase__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(lowercase__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation(lowercase__ , lowercase__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation_with_opt_and_scheduler(lowercase__ , lowercase__ ) def _snake_case ( lowercase__ : int ) -> List[Any]: '''simple docstring''' main() if __name__ == "__main__": main()
84
"""simple docstring""" def _snake_case ( lowercase__ : list[int] ) -> list[list[int]]: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = [] if len(lowercase__ ) == 1: return [nums.copy()] for _ in range(len(lowercase__ ) ): lowerCAmelCase_ :Optional[Any] = nums.pop(0 ) lowerCAmelCase_ :str = permute(lowercase__ ) for perm in permutations: perm.append(lowercase__ ) result.extend(lowercase__ ) nums.append(lowercase__ ) return result def _snake_case ( lowercase__ : Tuple ) -> List[str]: '''simple docstring''' def backtrack(lowercase__ : str ): if start == len(lowercase__ ) - 1: output.append(nums[:] ) else: for i in range(lowercase__ , len(lowercase__ ) ): lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start] backtrack(start + 1 ) lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start] # backtrack lowerCAmelCase_ :int = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function __UpperCAmelCase = permutea([1, 2, 3]) print(res) doctest.testmod()
84
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCAmelCase = { 'configuration_bridgetower': [ 'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BridgeTowerConfig', 'BridgeTowerTextConfig', 'BridgeTowerVisionConfig', ], 'processing_bridgetower': ['BridgeTowerProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['BridgeTowerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST', 'BridgeTowerForContrastiveLearning', 'BridgeTowerForImageAndTextRetrieval', 'BridgeTowerForMaskedLM', 'BridgeTowerModel', 'BridgeTowerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
84
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Any = BioGptTokenizer UpperCAmelCase_ :str = False def __lowerCAmelCase ( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase_ :Optional[Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] lowerCAmelCase_ :str = dict(zip(__A , range(len(__A ) ) ) ) lowerCAmelCase_ :int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(__A ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(__A ) ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: lowerCAmelCase_ :List[Any] = """lower newer""" lowerCAmelCase_ :Tuple = """lower newer""" return input_text, output_text def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :List[str] = BioGptTokenizer(self.vocab_file , self.merges_file ) lowerCAmelCase_ :Union[str, Any] = """lower""" lowerCAmelCase_ :Any = ["""low""", """er</w>"""] lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Dict = tokens + ["""<unk>"""] lowerCAmelCase_ :List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) @slow def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) lowerCAmelCase_ :List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__A ) lowerCAmelCase_ :List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A ) lowerCAmelCase_ :Optional[int] = tokenizer.build_inputs_with_special_tokens(__A ) lowerCAmelCase_ :List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
84
1
"""simple docstring""" import argparse import collections import json import os import re import string import sys import numpy as np __UpperCAmelCase = re.compile(R'\b(a|an|the)\b', re.UNICODE) __UpperCAmelCase = None def _snake_case ( ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" ) parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" ) parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" ) parser.add_argument( """--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" ) parser.add_argument( """--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" ) parser.add_argument( """--na-prob-thresh""" , """-t""" , type=lowercase__ , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , ) parser.add_argument( """--out-image-dir""" , """-p""" , metavar="""out_images""" , default=lowercase__ , help="""Save precision-recall curves to directory.""" ) parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def _snake_case ( lowercase__ : int ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: lowerCAmelCase_ :Any = bool(qa["""answers"""]["""text"""] ) return qid_to_has_ans def _snake_case ( lowercase__ : Optional[Any] ) -> Any: '''simple docstring''' def remove_articles(lowercase__ : int ): return ARTICLES_REGEX.sub(""" """ , lowercase__ ) def white_space_fix(lowercase__ : List[str] ): return " ".join(text.split() ) def remove_punc(lowercase__ : Union[str, Any] ): lowerCAmelCase_ :List[Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowercase__ : List[str] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowercase__ ) ) ) ) def _snake_case ( lowercase__ : Optional[Any] ) -> Dict: '''simple docstring''' if not s: return [] return normalize_answer(lowercase__ ).split() def _snake_case ( lowercase__ : Any , lowercase__ : Any ) -> Optional[int]: '''simple docstring''' return int(normalize_answer(lowercase__ ) == normalize_answer(lowercase__ ) ) def _snake_case ( lowercase__ : List[str] , lowercase__ : int ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :Any = get_tokens(lowercase__ ) lowerCAmelCase_ :str = get_tokens(lowercase__ ) lowerCAmelCase_ :Optional[Any] = collections.Counter(lowercase__ ) & collections.Counter(lowercase__ ) lowerCAmelCase_ :Optional[int] = sum(common.values() ) if len(lowercase__ ) == 0 or len(lowercase__ ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 lowerCAmelCase_ :Dict = 1.0 * num_same / len(lowercase__ ) lowerCAmelCase_ :str = 1.0 * num_same / len(lowercase__ ) lowerCAmelCase_ :List[Any] = (2 * precision * recall) / (precision + recall) return fa def _snake_case ( lowercase__ : Tuple , lowercase__ : str ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :int = {} lowerCAmelCase_ :Dict = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: lowerCAmelCase_ :Tuple = qa["""id"""] lowerCAmelCase_ :Union[str, Any] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(lowercase__ )] if not gold_answers: # For unanswerable questions, only correct answer is empty string lowerCAmelCase_ :Any = [""""""] if qid not in preds: print(f"""Missing prediction for {qid}""" ) continue lowerCAmelCase_ :List[str] = preds[qid] # Take max over all gold answers lowerCAmelCase_ :Any = max(compute_exact(lowercase__ , lowercase__ ) for a in gold_answers ) lowerCAmelCase_ :List[str] = max(compute_fa(lowercase__ , lowercase__ ) for a in gold_answers ) return exact_scores, fa_scores def _snake_case ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : List[str] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Any = {} for qid, s in scores.items(): lowerCAmelCase_ :str = na_probs[qid] > na_prob_thresh if pred_na: lowerCAmelCase_ :List[str] = float(not qid_to_has_ans[qid] ) else: lowerCAmelCase_ :Optional[Any] = s return new_scores def _snake_case ( lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : str=None ) -> Dict: '''simple docstring''' if not qid_list: lowerCAmelCase_ :Dict = len(lowercase__ ) return collections.OrderedDict( [ ("""exact""", 100.0 * sum(exact_scores.values() ) / total), ("""f1""", 100.0 * sum(fa_scores.values() ) / total), ("""total""", total), ] ) else: lowerCAmelCase_ :int = len(lowercase__ ) return collections.OrderedDict( [ ("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ("""total""", total), ] ) def _snake_case ( lowercase__ : Dict , lowercase__ : List[Any] , lowercase__ : List[str] ) -> Tuple: '''simple docstring''' for k in new_eval: lowerCAmelCase_ :int = new_eval[k] def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : str , lowercase__ : str ) -> Dict: '''simple docstring''' plt.step(lowercase__ , lowercase__ , color="""b""" , alpha=0.2 , where="""post""" ) plt.fill_between(lowercase__ , lowercase__ , step="""post""" , alpha=0.2 , color="""b""" ) plt.xlabel("""Recall""" ) plt.ylabel("""Precision""" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(lowercase__ ) plt.savefig(lowercase__ ) plt.clf() def _snake_case ( lowercase__ : int , lowercase__ : int , lowercase__ : Tuple , lowercase__ : List[str] , lowercase__ : Union[str, Any]=None , lowercase__ : List[Any]=None ) -> int: '''simple docstring''' lowerCAmelCase_ :int = sorted(lowercase__ , key=lambda lowercase__ : na_probs[k] ) lowerCAmelCase_ :Tuple = 0.0 lowerCAmelCase_ :List[str] = 1.0 lowerCAmelCase_ :Optional[Any] = 0.0 lowerCAmelCase_ :List[str] = [1.0] lowerCAmelCase_ :Dict = [0.0] lowerCAmelCase_ :str = 0.0 for i, qid in enumerate(lowercase__ ): if qid_to_has_ans[qid]: true_pos += scores[qid] lowerCAmelCase_ :List[Any] = true_pos / float(i + 1 ) lowerCAmelCase_ :List[Any] = true_pos / float(lowercase__ ) if i == len(lowercase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(lowercase__ ) recalls.append(lowercase__ ) if out_image: plot_pr_curve(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) return {"ap": 100.0 * avg_prec} def _snake_case ( lowercase__ : str , lowercase__ : Tuple , lowercase__ : int , lowercase__ : Any , lowercase__ : str , lowercase__ : int ) -> Union[str, Any]: '''simple docstring''' if out_image_dir and not os.path.exists(lowercase__ ): os.makedirs(lowercase__ ) lowerCAmelCase_ :Dict = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return lowerCAmelCase_ :Optional[int] = make_precision_recall_eval( lowercase__ , lowercase__ , lowercase__ , lowercase__ , out_image=os.path.join(lowercase__ , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , ) lowerCAmelCase_ :int = make_precision_recall_eval( lowercase__ , lowercase__ , lowercase__ , lowercase__ , out_image=os.path.join(lowercase__ , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , ) lowerCAmelCase_ :List[str] = {k: float(lowercase__ ) for k, v in qid_to_has_ans.items()} lowerCAmelCase_ :str = make_precision_recall_eval( lowercase__ , lowercase__ , lowercase__ , lowercase__ , out_image=os.path.join(lowercase__ , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , ) merge_eval(lowercase__ , lowercase__ , """pr_exact""" ) merge_eval(lowercase__ , lowercase__ , """pr_f1""" ) merge_eval(lowercase__ , lowercase__ , """pr_oracle""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : int ) -> List[Any]: '''simple docstring''' if not qid_list: return lowerCAmelCase_ :str = [na_probs[k] for k in qid_list] lowerCAmelCase_ :Tuple = np.ones_like(lowercase__ ) / float(len(lowercase__ ) ) plt.hist(lowercase__ , weights=lowercase__ , bins=2_0 , range=(0.0, 1.0) ) plt.xlabel("""Model probability of no-answer""" ) plt.ylabel("""Proportion of dataset""" ) plt.title(f"""Histogram of no-answer probability: {name}""" ) plt.savefig(os.path.join(lowercase__ , f"""na_prob_hist_{name}.png""" ) ) plt.clf() def _snake_case ( lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :int = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) lowerCAmelCase_ :List[Any] = num_no_ans lowerCAmelCase_ :int = cur_score lowerCAmelCase_ :Optional[Any] = 0.0 lowerCAmelCase_ :Union[str, Any] = sorted(lowercase__ , key=lambda lowercase__ : na_probs[k] ) for i, qid in enumerate(lowercase__ ): if qid not in scores: continue if qid_to_has_ans[qid]: lowerCAmelCase_ :Optional[int] = scores[qid] else: if preds[qid]: lowerCAmelCase_ :Optional[int] = -1 else: lowerCAmelCase_ :Union[str, Any] = 0 cur_score += diff if cur_score > best_score: lowerCAmelCase_ :Dict = cur_score lowerCAmelCase_ :Tuple = na_probs[qid] return 100.0 * best_score / len(lowercase__ ), best_thresh def _snake_case ( lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Union[str, Any] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ :Any = find_best_thresh(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = find_best_thresh(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) lowerCAmelCase_ :Optional[Any] = best_exact lowerCAmelCase_ :List[Any] = exact_thresh lowerCAmelCase_ :Tuple = best_fa lowerCAmelCase_ :str = fa_thresh def _snake_case ( ) -> Any: '''simple docstring''' with open(OPTS.data_file ) as f: lowerCAmelCase_ :Union[str, Any] = json.load(lowercase__ ) lowerCAmelCase_ :Optional[Any] = dataset_json["""data"""] with open(OPTS.pred_file ) as f: lowerCAmelCase_ :Dict = json.load(lowercase__ ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: lowerCAmelCase_ :Tuple = json.load(lowercase__ ) else: lowerCAmelCase_ :str = {k: 0.0 for k in preds} lowerCAmelCase_ :List[str] = make_qid_to_has_ans(lowercase__ ) # maps qid to True/False lowerCAmelCase_ :List[str] = [k for k, v in qid_to_has_ans.items() if v] lowerCAmelCase_ :int = [k for k, v in qid_to_has_ans.items() if not v] lowerCAmelCase_ , lowerCAmelCase_ :str = get_raw_scores(lowercase__ , lowercase__ ) lowerCAmelCase_ :Optional[Any] = apply_no_ans_threshold(lowercase__ , lowercase__ , lowercase__ , OPTS.na_prob_thresh ) lowerCAmelCase_ :Union[str, Any] = apply_no_ans_threshold(lowercase__ , lowercase__ , lowercase__ , OPTS.na_prob_thresh ) lowerCAmelCase_ :Any = make_eval_dict(lowercase__ , lowercase__ ) if has_ans_qids: lowerCAmelCase_ :Dict = make_eval_dict(lowercase__ , lowercase__ , qid_list=lowercase__ ) merge_eval(lowercase__ , lowercase__ , """HasAns""" ) if no_ans_qids: lowerCAmelCase_ :List[Any] = make_eval_dict(lowercase__ , lowercase__ , qid_list=lowercase__ ) merge_eval(lowercase__ , lowercase__ , """NoAns""" ) if OPTS.na_prob_file: find_all_best_thresh(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , OPTS.out_image_dir ) histogram_na_prob(lowercase__ , lowercase__ , OPTS.out_image_dir , """hasAns""" ) histogram_na_prob(lowercase__ , lowercase__ , OPTS.out_image_dir , """noAns""" ) if OPTS.out_file: with open(OPTS.out_file , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) else: print(json.dumps(lowercase__ , indent=2 ) ) if __name__ == "__main__": __UpperCAmelCase = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt main()
84
"""simple docstring""" from ...configuration_utils import PretrainedConfig class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "bert-generation" def __init__( self , __A=5_0358 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A=2 , __A=1 , __A="absolute" , __A=True , **__A , ) -> Tuple: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :Any = vocab_size lowerCAmelCase_ :List[Any] = hidden_size lowerCAmelCase_ :Optional[int] = num_hidden_layers lowerCAmelCase_ :int = num_attention_heads lowerCAmelCase_ :List[Any] = hidden_act lowerCAmelCase_ :Optional[Any] = intermediate_size lowerCAmelCase_ :List[Any] = hidden_dropout_prob lowerCAmelCase_ :int = attention_probs_dropout_prob lowerCAmelCase_ :Tuple = max_position_embeddings lowerCAmelCase_ :List[str] = initializer_range lowerCAmelCase_ :Union[str, Any] = layer_norm_eps lowerCAmelCase_ :List[str] = position_embedding_type lowerCAmelCase_ :Optional[int] = use_cache
84
1
"""simple docstring""" import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) lowerCAmelCase_ :int = """A painting of a squirrel eating a burger""" lowerCAmelCase_ :List[Any] = jax.device_count() lowerCAmelCase_ :Optional[Any] = num_samples * [prompt] lowerCAmelCase_ :int = sd_pipe.prepare_inputs(__A ) lowerCAmelCase_ :Optional[Any] = replicate(__A ) lowerCAmelCase_ :Union[str, Any] = shard(__A ) lowerCAmelCase_ :Optional[Any] = jax.random.PRNGKey(0 ) lowerCAmelCase_ :Tuple = jax.random.split(__A , jax.device_count() ) lowerCAmelCase_ :Union[str, Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowerCAmelCase_ :Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1] lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCAmelCase_ :Optional[int] = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Union[str, Any] = """stabilityai/stable-diffusion-2""" lowerCAmelCase_ , lowerCAmelCase_ :Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__A , subfolder="""scheduler""" ) lowerCAmelCase_ , lowerCAmelCase_ :List[str] = FlaxStableDiffusionPipeline.from_pretrained( __A , scheduler=__A , revision="""bf16""" , dtype=jnp.bfloataa , ) lowerCAmelCase_ :Optional[int] = scheduler_params lowerCAmelCase_ :List[Any] = """A painting of a squirrel eating a burger""" lowerCAmelCase_ :Tuple = jax.device_count() lowerCAmelCase_ :str = num_samples * [prompt] lowerCAmelCase_ :Union[str, Any] = sd_pipe.prepare_inputs(__A ) lowerCAmelCase_ :Tuple = replicate(__A ) lowerCAmelCase_ :Optional[int] = shard(__A ) lowerCAmelCase_ :List[str] = jax.random.PRNGKey(0 ) lowerCAmelCase_ :List[Any] = jax.random.split(__A , jax.device_count() ) lowerCAmelCase_ :Optional[Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowerCAmelCase_ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1] lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCAmelCase_ :Dict = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
84
"""simple docstring""" def _snake_case ( lowercase__ : List[Any] , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Any ) -> int: '''simple docstring''' lowerCAmelCase_ :int = [False] * len(lowercase__ ) lowerCAmelCase_ :str = [] queue.append(lowercase__ ) lowerCAmelCase_ :Any = True while queue: lowerCAmelCase_ :Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) lowerCAmelCase_ :Union[str, Any] = True lowerCAmelCase_ :int = u return visited[t] def _snake_case ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : str ) -> Dict: '''simple docstring''' lowerCAmelCase_ :List[Any] = [-1] * (len(lowercase__ )) lowerCAmelCase_ :str = 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): lowerCAmelCase_ :List[str] = float("""Inf""" ) lowerCAmelCase_ :List[str] = sink while s != source: # Find the minimum value in select path lowerCAmelCase_ :Any = min(lowercase__ , graph[parent[s]][s] ) lowerCAmelCase_ :Union[str, Any] = parent[s] max_flow += path_flow lowerCAmelCase_ :Tuple = sink while v != source: lowerCAmelCase_ :List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase_ :Union[str, Any] = parent[v] return max_flow __UpperCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __UpperCAmelCase , __UpperCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
84
1
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Tuple = ["audio_values", "audio_mask"] def __init__( self , __A=2048 , __A=1 , __A=[16, 16] , __A=128 , __A=4_4100 , __A=86 , __A=2048 , __A=0.0 , **__A , ) -> int: super().__init__( feature_size=__A , sampling_rate=__A , padding_value=__A , **__A , ) lowerCAmelCase_ :str = spectrogram_length lowerCAmelCase_ :Union[str, Any] = num_channels lowerCAmelCase_ :List[Any] = patch_size lowerCAmelCase_ :Dict = feature_size // self.patch_size[1] lowerCAmelCase_ :int = n_fft lowerCAmelCase_ :Optional[Any] = sampling_rate // hop_length_to_sampling_rate lowerCAmelCase_ :List[Any] = sampling_rate lowerCAmelCase_ :List[Any] = padding_value lowerCAmelCase_ :Dict = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__A , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__A , norm="""slaney""" , mel_scale="""slaney""" , ).T def __lowerCAmelCase ( self , __A ) -> np.ndarray: lowerCAmelCase_ :Dict = spectrogram( __A , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=8_0.0 , ) lowerCAmelCase_ :Dict = log_spec[:, :-1] lowerCAmelCase_ :Union[str, Any] = log_spec - 2_0.0 lowerCAmelCase_ :Union[str, Any] = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , __A , __A = None , __A = True , __A = None , __A = False , __A = False , **__A , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( """This feature extractor is set to support sampling rate""" f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) lowerCAmelCase_ :Tuple = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) lowerCAmelCase_ :Tuple = is_batched_numpy or ( isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCAmelCase_ :Tuple = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__A , np.ndarray ): lowerCAmelCase_ :List[Any] = np.asarray(__A , dtype=np.floataa ) elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCAmelCase_ :Union[str, Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCAmelCase_ :Union[str, Any] = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowerCAmelCase_ :Optional[int] = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __A ): lowerCAmelCase_ :Union[str, Any] = [np.asarray(__A , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowerCAmelCase_ :Dict = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowerCAmelCase_ :Dict = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowerCAmelCase_ :str = np.array(__A ).astype(np.floataa ) # convert into correct format for padding lowerCAmelCase_ :Optional[Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowerCAmelCase_ :int = np.ones([len(__A ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowerCAmelCase_ :List[Any] = padded_audio_features * self.padding_value for i in range(len(__A ) ): lowerCAmelCase_ :Optional[Any] = audio_features[i] lowerCAmelCase_ :str = feature # return as BatchFeature if return_attention_mask: lowerCAmelCase_ :Tuple = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask} else: lowerCAmelCase_ :Optional[int] = {"""audio_values""": padded_audio_features} lowerCAmelCase_ :Union[str, Any] = BatchFeature(data=__A , tensor_type=__A ) return encoded_inputs
84
"""simple docstring""" import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = 1_0 lowerCAmelCase_ :Optional[int] = datasets.Features( { """tokens""": datasets.Sequence(datasets.Value("""string""" ) ), """labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ), """answers""": datasets.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), """id""": datasets.Value("""int64""" ), } ) lowerCAmelCase_ :int = datasets.Dataset.from_dict( { """tokens""": [["""foo"""] * 5] * n, """labels""": [[1] * 5] * n, """answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0, """id""": list(range(lowercase__ ) ), } , features=lowercase__ , ) return dataset @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : int ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return filename # FILE_CONTENT + files __UpperCAmelCase = '\\n Text data.\n Second line of data.' @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> str: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt""" lowerCAmelCase_ :List[Any] = FILE_CONTENT with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> Tuple: '''simple docstring''' import bza lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2""" lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] ) -> Dict: '''simple docstring''' import gzip lowerCAmelCase_ :int = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" ) lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with gzip.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' if datasets.config.LZ4_AVAILABLE: import lza.frame lowerCAmelCase_ :List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4""" lowerCAmelCase_ :int = bytes(lowercase__ , """utf-8""" ) with lza.frame.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[int] ) -> Any: '''simple docstring''' if datasets.config.PY7ZR_AVAILABLE: import pyazr lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z""" with pyazr.SevenZipFile(lowercase__ , """w""" ) as archive: archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' import tarfile lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> str: '''simple docstring''' import lzma lowerCAmelCase_ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz""" lowerCAmelCase_ :Optional[Any] = bytes(lowercase__ , """utf-8""" ) with lzma.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import zipfile lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Tuple: '''simple docstring''' if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst""" lowerCAmelCase_ :Any = bytes(lowercase__ , """utf-8""" ) with zstd.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """file.xml""" lowerCAmelCase_ :Any = textwrap.dedent( """\ <?xml version=\"1.0\" encoding=\"UTF-8\" ?> <tmx version=\"1.4\"> <header segtype=\"sentence\" srclang=\"ca\" /> <body> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv> </tu> </body> </tmx>""" ) with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename __UpperCAmelCase = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __UpperCAmelCase = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __UpperCAmelCase = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __UpperCAmelCase = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __UpperCAmelCase = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Union[str, Any]: '''simple docstring''' return DATA_DICT_OF_LISTS @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Any: '''simple docstring''' lowerCAmelCase_ :Tuple = datasets.Dataset.from_dict(lowercase__ ) lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" ) with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con: lowerCAmelCase_ :Union[str, Any] = con.cursor() cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" ) for item in DATA: cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> int: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Optional[int] = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Any: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Dict = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Dict ) -> Union[str, Any]: '''simple docstring''' import bza lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2""" with open(lowercase__ , """rb""" ) as f: lowerCAmelCase_ :Union[str, Any] = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) ) f.write(lowercase__ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : str ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" ) lowerCAmelCase_ :Optional[Any] = pa.schema( { """col_1""": pa.string(), """col_2""": pa.intaa(), """col_3""": pa.floataa(), } ) with open(lowercase__ , """wb""" ) as f: lowerCAmelCase_ :Optional[int] = pq.ParquetWriter(lowercase__ , schema=lowercase__ ) lowerCAmelCase_ :List[str] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ ) writer.write_table(lowercase__ ) writer.close() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Union[str, Any] = {"""data""": DATA} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Optional[Any] = {"""data""": DATA_DICT_OF_LISTS} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_312: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_STR: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int , lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : List[Any] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[str] ) -> int: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : List[str] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : str , lowercase__ : List[str] , lowercase__ : int ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :str = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Dict = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.abc""" with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : List[str] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename("""unsupported.ext""" ) ) f.write(lowercase__ , arcname=os.path.basename("""unsupported_2.ext""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] ) lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" ) with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> int: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Tuple: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : Tuple ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace(""".jpg""" , """2.jpg""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data_dir""" ) (data_dir / "subdir").mkdir() with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden file with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) return data_dir
84
1
"""simple docstring""" def _snake_case ( lowercase__ : int , lowercase__ : int ) -> int: '''simple docstring''' return 1 if input_a == input_a else 0 def _snake_case ( ) -> None: '''simple docstring''' assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
84
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Optional[Any] = "data2vec-text" def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.0_2 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> Tuple: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :Dict = vocab_size lowerCAmelCase_ :Dict = hidden_size lowerCAmelCase_ :int = num_hidden_layers lowerCAmelCase_ :List[Any] = num_attention_heads lowerCAmelCase_ :Any = hidden_act lowerCAmelCase_ :Optional[int] = intermediate_size lowerCAmelCase_ :str = hidden_dropout_prob lowerCAmelCase_ :Any = attention_probs_dropout_prob lowerCAmelCase_ :str = max_position_embeddings lowerCAmelCase_ :int = type_vocab_size lowerCAmelCase_ :Tuple = initializer_range lowerCAmelCase_ :List[Any] = layer_norm_eps lowerCAmelCase_ :List[Any] = position_embedding_type lowerCAmelCase_ :List[Any] = use_cache lowerCAmelCase_ :List[Any] = classifier_dropout class _SCREAMING_SNAKE_CASE ( A__ ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCAmelCase_ :List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCAmelCase_ :List[str] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
84
1
"""simple docstring""" def _snake_case ( lowercase__ : list[int] ) -> list[list[int]]: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = [] if len(lowercase__ ) == 1: return [nums.copy()] for _ in range(len(lowercase__ ) ): lowerCAmelCase_ :Optional[Any] = nums.pop(0 ) lowerCAmelCase_ :str = permute(lowercase__ ) for perm in permutations: perm.append(lowercase__ ) result.extend(lowercase__ ) nums.append(lowercase__ ) return result def _snake_case ( lowercase__ : Tuple ) -> List[str]: '''simple docstring''' def backtrack(lowercase__ : str ): if start == len(lowercase__ ) - 1: output.append(nums[:] ) else: for i in range(lowercase__ , len(lowercase__ ) ): lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start] backtrack(start + 1 ) lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start] # backtrack lowerCAmelCase_ :int = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function __UpperCAmelCase = permutea([1, 2, 3]) print(res) doctest.testmod()
84
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( lowercase__ : Dict , lowercase__ : Dict , lowercase__ : str , lowercase__ : Tuple="attention" ) -> str: '''simple docstring''' lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""] lowerCAmelCase_ :Union[str, Any] = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""] lowerCAmelCase_ :Any = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""] lowerCAmelCase_ :Optional[int] = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""] return k, o, q, v def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : Any=False ) -> int: '''simple docstring''' if split_mlp_wi: lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""] lowerCAmelCase_ :List[str] = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""] lowerCAmelCase_ :Tuple = (wi_a, wi_a) else: lowerCAmelCase_ :List[Any] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""] lowerCAmelCase_ :Dict = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""] return wi, wo def _snake_case ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] ) -> Tuple: '''simple docstring''' return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""] def _snake_case ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = traverse_util.flatten_dict(variables["""target"""] ) lowerCAmelCase_ :Tuple = {"""/""".join(lowercase__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowerCAmelCase_ :Any = """encoder/layers_0/mlp/wi_0/kernel""" in old print("""Split MLP:""" , lowercase__ ) lowerCAmelCase_ :List[Any] = collections.OrderedDict() # Shared embeddings. lowerCAmelCase_ :Optional[int] = old["""token_embedder/embedding"""] # Encoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" ) lowerCAmelCase_ :Optional[Any] = layer_norm lowerCAmelCase_ :Any = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Tuple = q.T lowerCAmelCase_ :str = v.T # Block i, layer 1 (MLP). lowerCAmelCase_ :Dict = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Any = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ ) lowerCAmelCase_ :Union[str, Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :List[Any] = wi[0].T lowerCAmelCase_ :Dict = wi[1].T else: lowerCAmelCase_ :int = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Tuple = old[ """encoder/relpos_bias/rel_embedding""" ].T lowerCAmelCase_ :List[str] = old["""encoder/encoder_norm/scale"""] if not is_encoder_only: # Decoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" ) lowerCAmelCase_ :List[Any] = layer_norm lowerCAmelCase_ :List[str] = k.T lowerCAmelCase_ :Any = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :Dict = v.T # Block i, layer 1 (Cross Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" ) lowerCAmelCase_ :Optional[int] = layer_norm lowerCAmelCase_ :str = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :int = v.T # Block i, layer 2 (MLP). lowerCAmelCase_ :Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Dict = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ ) lowerCAmelCase_ :List[Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :Any = wi[0].T lowerCAmelCase_ :Any = wi[1].T else: lowerCAmelCase_ :Tuple = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Optional[Any] = old["""decoder/decoder_norm/scale"""] lowerCAmelCase_ :Optional[Any] = old[ """decoder/relpos_bias/rel_embedding""" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowerCAmelCase_ :Tuple = old["""decoder/logits_dense/kernel"""].T return new def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Optional[int] = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Tuple = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) lowerCAmelCase_ :Any = state_dict["""shared.weight"""] return state_dict def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :List[Any] = checkpoints.load_tax_checkpoint(lowercase__ ) lowerCAmelCase_ :Optional[int] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ ) lowerCAmelCase_ :Union[str, Any] = make_state_dict(lowercase__ , lowercase__ ) model.load_state_dict(lowercase__ , strict=lowercase__ ) def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : bool = False ) -> Any: '''simple docstring''' lowerCAmelCase_ :Any = TaConfig.from_json_file(lowercase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowerCAmelCase_ :List[Any] = TaEncoderModel(lowercase__ ) else: lowerCAmelCase_ :List[str] = TaForConditionalGeneration(lowercase__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(lowercase__ ) # Verify that we can load the checkpoint. model.from_pretrained(lowercase__ ) print("""Done""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) __UpperCAmelCase = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
84
1
"""simple docstring""" from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Optional[int] = ["pixel_values"] def __init__( self , __A = True , __A = None , __A = PILImageResampling.BICUBIC , __A = True , __A = None , __A = True , __A = 1 / 255 , __A = True , __A = IMAGENET_DEFAULT_MEAN , __A = IMAGENET_DEFAULT_STD , **__A , ) -> None: super().__init__(**__A ) lowerCAmelCase_ :Optional[int] = size if size is not None else {"""shortest_edge""": 224} lowerCAmelCase_ :Optional[Any] = get_size_dict(__A , default_to_square=__A ) lowerCAmelCase_ :int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowerCAmelCase_ :Dict = get_size_dict(__A , param_name="""crop_size""" ) lowerCAmelCase_ :int = do_resize lowerCAmelCase_ :Optional[Any] = size lowerCAmelCase_ :str = resample lowerCAmelCase_ :str = do_center_crop lowerCAmelCase_ :List[Any] = crop_size lowerCAmelCase_ :Optional[Any] = do_rescale lowerCAmelCase_ :Optional[int] = rescale_factor lowerCAmelCase_ :Any = do_normalize lowerCAmelCase_ :List[str] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowerCAmelCase_ :Union[str, Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __lowerCAmelCase ( self , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> np.ndarray: lowerCAmelCase_ :Any = get_size_dict(__A , default_to_square=__A ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: lowerCAmelCase_ :Optional[Any] = int((256 / 224) * size["""shortest_edge"""] ) lowerCAmelCase_ :Tuple = get_resize_output_image_size(__A , size=__A , default_to_square=__A ) lowerCAmelCase_ :str = {"""height""": output_size[0], """width""": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" ) return resize( __A , size=(size_dict["""height"""], size_dict["""width"""]) , resample=__A , data_format=__A , **__A ) def __lowerCAmelCase ( self , __A , __A , __A = None , **__A , ) -> np.ndarray: lowerCAmelCase_ :Any = get_size_dict(__A ) if "height" not in size or "width" not in size: raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A ) def __lowerCAmelCase ( self , __A , __A , __A = None , **__A , ) -> np.ndarray: return rescale(__A , scale=__A , data_format=__A , **__A ) def __lowerCAmelCase ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray: return normalize(__A , mean=__A , std=__A , data_format=__A , **__A ) def __lowerCAmelCase ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> BatchFeature: lowerCAmelCase_ :Union[str, Any] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ :Union[str, Any] = resample if resample is not None else self.resample lowerCAmelCase_ :Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase_ :Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ :str = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ :str = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ :Any = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ :List[Any] = image_std if image_std is not None else self.image_std lowerCAmelCase_ :List[Any] = size if size is not None else self.size lowerCAmelCase_ :str = get_size_dict(__A , default_to_square=__A ) lowerCAmelCase_ :Optional[int] = crop_size if crop_size is not None else self.crop_size lowerCAmelCase_ :int = get_size_dict(__A , param_name="""crop_size""" ) lowerCAmelCase_ :Optional[Any] = make_list_of_images(__A ) if not valid_images(__A ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowerCAmelCase_ :Any = [to_numpy_array(__A ) for image in images] if do_resize: lowerCAmelCase_ :Tuple = [self.resize(__A , __A , __A ) for image in images] if do_center_crop: lowerCAmelCase_ :int = [self.center_crop(__A , __A ) for image in images] if do_rescale: lowerCAmelCase_ :Optional[int] = [self.rescale(__A , __A ) for image in images] if do_normalize: lowerCAmelCase_ :List[str] = [self.normalize(__A , __A , __A ) for image in images] lowerCAmelCase_ :Tuple = [to_channel_dimension_format(__A , __A ) for image in images] lowerCAmelCase_ :List[str] = {"""pixel_values""": images} return BatchFeature(data=__A , tensor_type=__A )
84
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( lowercase__ : Optional[Any] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): lowerCAmelCase_ :Union[str, Any] = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): lowerCAmelCase_ :Any = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase_ :List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] lowerCAmelCase_ :Tuple = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowercase__ )-1}""" ) if "norm" in key: lowerCAmelCase_ :Dict = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase_ :str = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] lowerCAmelCase_ :str = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowercase__ )-1}""" ) if "layer_norm1" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: lowerCAmelCase_ :str = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase_ :List[str] = key[key.find("""block""" ) + len("""block""" )] lowerCAmelCase_ :int = key.replace(f"""block{idx}""" , f"""block.{int(lowercase__ )-1}""" ) if "attn.q" in key: lowerCAmelCase_ :Tuple = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: lowerCAmelCase_ :Optional[int] = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: lowerCAmelCase_ :str = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: lowerCAmelCase_ :List[Any] = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: lowerCAmelCase_ :List[str] = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: lowerCAmelCase_ :str = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) lowerCAmelCase_ :Any = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase_ :str = key[key.find("""linear_c""" ) + len("""linear_c""" )] lowerCAmelCase_ :Optional[int] = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowercase__ )-1}""" ) if "bot_conv" in key: lowerCAmelCase_ :Union[str, Any] = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: lowerCAmelCase_ :int = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: lowerCAmelCase_ :str = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: lowerCAmelCase_ :Any = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: lowerCAmelCase_ :List[str] = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: lowerCAmelCase_ :Dict = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: lowerCAmelCase_ :Any = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): lowerCAmelCase_ :Tuple = key.replace("""module.last_layer_depth""" , """head.head""" ) lowerCAmelCase_ :List[Any] = value return new_state_dict def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" ) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict lowerCAmelCase_ :Optional[Any] = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase_ :Union[str, Any] = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase_ :List[Any] = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase_ :int = kv_bias[config.hidden_sizes[i] :] def _snake_case ( ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCAmelCase_ :Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return image @torch.no_grad() def _snake_case ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Dict=False , lowercase__ : List[Any]=None ) -> int: '''simple docstring''' lowerCAmelCase_ :int = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] ) # load image processor (only resize + rescale) lowerCAmelCase_ :Union[str, Any] = GLPNImageProcessor() # prepare image lowerCAmelCase_ :List[Any] = prepare_img() lowerCAmelCase_ :int = image_processor(images=lowercase__ , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict lowerCAmelCase_ :Tuple = torch.load(lowercase__ , map_location=torch.device("""cpu""" ) ) # rename keys lowerCAmelCase_ :Union[str, Any] = rename_keys(lowercase__ ) # key and value matrices need special treatment read_in_k_v(lowercase__ , lowercase__ ) # create HuggingFace model and load state dict lowerCAmelCase_ :List[Any] = GLPNForDepthEstimation(lowercase__ ) model.load_state_dict(lowercase__ ) model.eval() # forward pass lowerCAmelCase_ :Dict = model(lowercase__ ) lowerCAmelCase_ :Tuple = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: lowerCAmelCase_ :Optional[Any] = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: lowerCAmelCase_ :Any = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(f"""Unknown model name: {model_name}""" ) lowerCAmelCase_ :Union[str, Any] = torch.Size([1, 4_8_0, 6_4_0] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , lowercase__ , atol=1E-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowercase__ , ) image_processor.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowercase__ , ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) parser.add_argument( '--model_name', default='glpn-kitti', type=str, help='Name of the model in case you\'re pushing to the hub.', ) __UpperCAmelCase = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
84
1
"""simple docstring""" # This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def _snake_case ( lowercase__ : List[Any] , lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = multiprocessing.Manager() lowerCAmelCase_ :Union[str, Any] = manager.list() lowerCAmelCase_ :Any = multiprocessing.Process(target=lowercase__ , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("""timed out""" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def _snake_case ( lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple ) -> List[str]: '''simple docstring''' with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil lowerCAmelCase_ :Union[str, Any] = shutil.rmtree lowerCAmelCase_ :str = os.rmdir lowerCAmelCase_ :Any = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: lowerCAmelCase_ :Any = {} with swallow_io(): with time_limit(lowercase__ ): exec(lowercase__ , lowercase__ ) result.append("""passed""" ) except TimeoutException: result.append("""timed out""" ) except BaseException as e: result.append(f"""failed: {e}""" ) # Needed for cleaning up. lowerCAmelCase_ :Dict = rmtree lowerCAmelCase_ :List[str] = rmdir lowerCAmelCase_ :int = chdir @contextlib.contextmanager def _snake_case ( lowercase__ : Tuple ) -> Optional[int]: '''simple docstring''' def signal_handler(lowercase__ : List[Any] , lowercase__ : Dict ): raise TimeoutException("""Timed out!""" ) signal.setitimer(signal.ITIMER_REAL , lowercase__ ) signal.signal(signal.SIGALRM , lowercase__ ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def _snake_case ( ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :List[str] = WriteOnlyStringIO() with contextlib.redirect_stdout(lowercase__ ): with contextlib.redirect_stderr(lowercase__ ): with redirect_stdin(lowercase__ ): yield @contextlib.contextmanager def _snake_case ( ) -> int: '''simple docstring''' with tempfile.TemporaryDirectory() as dirname: with chdir(lowercase__ ): yield dirname class _SCREAMING_SNAKE_CASE ( A__ ): pass class _SCREAMING_SNAKE_CASE ( io.StringIO ): def __lowerCAmelCase ( self , *__A , **__A ) -> List[str]: raise OSError def __lowerCAmelCase ( self , *__A , **__A ) -> Optional[int]: raise OSError def __lowerCAmelCase ( self , *__A , **__A ) -> List[Any]: raise OSError def __lowerCAmelCase ( self , *__A , **__A ) -> Dict: return False class _SCREAMING_SNAKE_CASE ( contextlib._RedirectStream ): # type: ignore UpperCAmelCase_ :Union[str, Any] = "stdin" @contextlib.contextmanager def _snake_case ( lowercase__ : Dict ) -> Dict: '''simple docstring''' if root == ".": yield return lowerCAmelCase_ :List[Any] = os.getcwd() os.chdir(lowercase__ ) try: yield except BaseException as exc: raise exc finally: os.chdir(lowercase__ ) def _snake_case ( lowercase__ : List[str]=None ) -> List[str]: '''simple docstring''' if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins lowerCAmelCase_ :Any = None lowerCAmelCase_ :Optional[int] = None import os lowerCAmelCase_ :List[str] = """1""" lowerCAmelCase_ :Tuple = None lowerCAmelCase_ :Union[str, Any] = None lowerCAmelCase_ :Dict = None lowerCAmelCase_ :Dict = None lowerCAmelCase_ :Any = None lowerCAmelCase_ :Dict = None lowerCAmelCase_ :Union[str, Any] = None lowerCAmelCase_ :List[Any] = None lowerCAmelCase_ :Optional[Any] = None lowerCAmelCase_ :Dict = None lowerCAmelCase_ :Any = None lowerCAmelCase_ :str = None lowerCAmelCase_ :Optional[Any] = None lowerCAmelCase_ :Optional[int] = None lowerCAmelCase_ :int = None lowerCAmelCase_ :List[str] = None lowerCAmelCase_ :Union[str, Any] = None lowerCAmelCase_ :List[str] = None lowerCAmelCase_ :Optional[int] = None lowerCAmelCase_ :Any = None lowerCAmelCase_ :Optional[Any] = None lowerCAmelCase_ :Dict = None lowerCAmelCase_ :List[str] = None lowerCAmelCase_ :Optional[Any] = None lowerCAmelCase_ :List[str] = None lowerCAmelCase_ :Optional[int] = None lowerCAmelCase_ :Dict = None import shutil lowerCAmelCase_ :Union[str, Any] = None lowerCAmelCase_ :int = None lowerCAmelCase_ :int = None import subprocess lowerCAmelCase_ :Dict = None # type: ignore lowerCAmelCase_ :Dict = None import sys lowerCAmelCase_ :Tuple = None lowerCAmelCase_ :int = None lowerCAmelCase_ :Optional[int] = None lowerCAmelCase_ :Union[str, Any] = None lowerCAmelCase_ :Optional[Any] = None
84
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
84
1
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Tuple = PegasusTokenizer UpperCAmelCase_ :List[Any] = PegasusTokenizerFast UpperCAmelCase_ :Optional[int] = True UpperCAmelCase_ :Union[str, Any] = True def __lowerCAmelCase ( self ) -> Tuple: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase_ :Optional[int] = PegasusTokenizer(__A ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __lowerCAmelCase ( self ) -> int: return PegasusTokenizer.from_pretrained("""google/pegasus-large""" ) def __lowerCAmelCase ( self , **__A ) -> PegasusTokenizer: return PegasusTokenizer.from_pretrained(self.tmpdirname , **__A ) def __lowerCAmelCase ( self , __A ) -> List[Any]: return ("This is a test", "This is a test") def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Any = """</s>""" lowerCAmelCase_ :Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A ) def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """</s>""" ) self.assertEqual(vocab_keys[-1] , """v""" ) self.assertEqual(len(__A ) , 1103 ) def __lowerCAmelCase ( self ) -> Optional[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase_ :Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase_ :Optional[Any] = ( """Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important""" """ </s> <pad> <pad> <pad>""" ) lowerCAmelCase_ :List[str] = rust_tokenizer([raw_input_str] , return_tensors=__A , add_special_tokens=__A ).input_ids[0] lowerCAmelCase_ :List[Any] = py_tokenizer([raw_input_str] , return_tensors=__A , add_special_tokens=__A ).input_ids[0] self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :Dict = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase_ :int = """<mask_1> To ensure a <mask_2> flow of bank resolutions.""" lowerCAmelCase_ :Any = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase_ :Dict = tokenizer([raw_input_str] , return_tensors=__A ).input_ids[0] self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :int = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase_ :Dict = """To ensure a smooth flow of bank resolutions.""" lowerCAmelCase_ :List[str] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase_ :Union[str, Any] = tokenizer([raw_input_str] , return_tensors=__A ).input_ids[0] self.assertListEqual(__A , __A ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Dict = ["""This is going to be way too long.""" * 150, """short example"""] lowerCAmelCase_ :int = ["""not super long but more than 5 tokens""", """tiny"""] lowerCAmelCase_ :str = self._large_tokenizer(__A , padding=__A , truncation=__A , return_tensors="""pt""" ) lowerCAmelCase_ :Dict = self._large_tokenizer( text_target=__A , max_length=5 , padding=__A , truncation=__A , return_tensors="""pt""" ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(__A ) == 2 # input_ids, attention_mask. @slow def __lowerCAmelCase ( self ) -> List[Any]: # fmt: off lowerCAmelCase_ :Dict = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__A , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , ) @require_sentencepiece @require_tokenizers class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Optional[Any] = PegasusTokenizer UpperCAmelCase_ :Any = PegasusTokenizerFast UpperCAmelCase_ :Optional[Any] = True UpperCAmelCase_ :List[Any] = True def __lowerCAmelCase ( self ) -> List[str]: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase_ :Any = PegasusTokenizer(__A , offset=0 , mask_token_sent=__A , mask_token="""[MASK]""" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __lowerCAmelCase ( self ) -> int: return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" ) def __lowerCAmelCase ( self , **__A ) -> PegasusTokenizer: return PegasusTokenizer.from_pretrained(self.tmpdirname , **__A ) def __lowerCAmelCase ( self , __A ) -> Tuple: return ("This is a test", "This is a test") def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase_ :Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase_ :int = ( """Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>""" """ <pad> <pad> <pad>""" ) lowerCAmelCase_ :int = rust_tokenizer([raw_input_str] , return_tensors=__A , add_special_tokens=__A ).input_ids[0] lowerCAmelCase_ :List[Any] = py_tokenizer([raw_input_str] , return_tensors=__A , add_special_tokens=__A ).input_ids[0] self.assertListEqual(__A , __A ) @require_torch def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Optional[Any] = ["""This is going to be way too long.""" * 1000, """short example"""] lowerCAmelCase_ :List[str] = ["""not super long but more than 5 tokens""", """tiny"""] lowerCAmelCase_ :Any = self._large_tokenizer(__A , padding=__A , truncation=__A , return_tensors="""pt""" ) lowerCAmelCase_ :Any = self._large_tokenizer( text_target=__A , max_length=5 , padding=__A , truncation=__A , return_tensors="""pt""" ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(__A ) == 2 # input_ids, attention_mask. def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Dict = ( """This is an example string that is used to test the original TF implementation against the HF""" """ implementation""" ) lowerCAmelCase_ :Any = self._large_tokenizer(__A ).input_ids self.assertListEqual( __A , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
84
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json', # See all LeViT models at https://huggingface.co/models?filter=levit } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "levit" def __init__( self , __A=224 , __A=3 , __A=3 , __A=2 , __A=1 , __A=16 , __A=[128, 256, 384] , __A=[4, 8, 12] , __A=[4, 4, 4] , __A=[16, 16, 16] , __A=0 , __A=[2, 2, 2] , __A=[2, 2, 2] , __A=0.0_2 , **__A , ) -> Any: super().__init__(**__A ) lowerCAmelCase_ :Tuple = image_size lowerCAmelCase_ :Optional[int] = num_channels lowerCAmelCase_ :Union[str, Any] = kernel_size lowerCAmelCase_ :Optional[Any] = stride lowerCAmelCase_ :Optional[int] = padding lowerCAmelCase_ :Optional[Any] = hidden_sizes lowerCAmelCase_ :Optional[int] = num_attention_heads lowerCAmelCase_ :int = depths lowerCAmelCase_ :List[str] = key_dim lowerCAmelCase_ :str = drop_path_rate lowerCAmelCase_ :Optional[int] = patch_size lowerCAmelCase_ :Union[str, Any] = attention_ratio lowerCAmelCase_ :Dict = mlp_ratio lowerCAmelCase_ :Any = initializer_range lowerCAmelCase_ :Optional[int] = [ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Tuple = version.parse("1.11" ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __lowerCAmelCase ( self ) -> float: return 1E-4
84
1
"""simple docstring""" import logging import os from .state import PartialState class _SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ): @staticmethod def __lowerCAmelCase ( __A ) -> Optional[Any]: lowerCAmelCase_ :List[str] = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def __lowerCAmelCase ( self , __A , __A , *__A , **__A ) -> Any: if PartialState._shared_state == {}: raise RuntimeError( """You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" ) lowerCAmelCase_ :Optional[Any] = kwargs.pop("""main_process_only""" , __A ) lowerCAmelCase_ :List[str] = kwargs.pop("""in_order""" , __A ) if self.isEnabledFor(__A ): if self._should_log(__A ): lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = self.process(__A , __A ) self.logger.log(__A , __A , *__A , **__A ) elif in_order: lowerCAmelCase_ :List[str] = PartialState() for i in range(state.num_processes ): if i == state.process_index: lowerCAmelCase_ , lowerCAmelCase_ :Dict = self.process(__A , __A ) self.logger.log(__A , __A , *__A , **__A ) state.wait_for_everyone() def _snake_case ( lowercase__ : str , lowercase__ : str = None ) -> List[Any]: '''simple docstring''' if log_level is None: lowerCAmelCase_ :int = os.environ.get("""ACCELERATE_LOG_LEVEL""" , lowercase__ ) lowerCAmelCase_ :List[str] = logging.getLogger(lowercase__ ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(lowercase__ , {} )
84
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def _snake_case ( lowercase__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = FileLock(str(tmpdir / """foo.lock""" ) ) lowerCAmelCase_ :Union[str, Any] = FileLock(str(tmpdir / """foo.lock""" ) ) lowerCAmelCase_ :Dict = 0.01 with locka.acquire(): with pytest.raises(lowercase__ ): lowerCAmelCase_ :List[Any] = time.time() locka.acquire(lowercase__ ) assert time.time() - _start > timeout def _snake_case ( lowercase__ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = """a""" * 1_0_0_0 + """.lock""" lowerCAmelCase_ :Optional[Any] = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(lowercase__ ) assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5 lowerCAmelCase_ :Any = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(lowercase__ ): locka.acquire(0 )
84
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: __UpperCAmelCase = None __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} __UpperCAmelCase = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } __UpperCAmelCase = { 'google/rembert': 2_56, } __UpperCAmelCase = '▁' class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Optional[Any] = VOCAB_FILES_NAMES UpperCAmelCase_ :Optional[int] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ :Union[str, Any] = RemBertTokenizer def __init__( self , __A=None , __A=None , __A=True , __A=True , __A=False , __A="[CLS]" , __A="[SEP]" , __A="<unk>" , __A="[SEP]" , __A="<pad>" , __A="[CLS]" , __A="[MASK]" , **__A , ) -> Optional[Any]: # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ :str = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( __A , tokenizer_file=__A , do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , ) lowerCAmelCase_ :Optional[int] = do_lower_case lowerCAmelCase_ :Union[str, Any] = remove_space lowerCAmelCase_ :List[str] = keep_accents lowerCAmelCase_ :List[Any] = vocab_file lowerCAmelCase_ :Optional[int] = False if not self.vocab_file else True def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]: lowerCAmelCase_ :Dict = [self.sep_token_id] lowerCAmelCase_ :Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self , __A , __A = None , __A = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1] def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]: lowerCAmelCase_ :Any = [self.sep_token_id] lowerCAmelCase_ :List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]: if not os.path.isdir(__A ): logger.error("""Vocabulary path ({}) should be a directory""".format(__A ) ) return lowerCAmelCase_ :Union[str, Any] = os.path.join( __A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ): copyfile(self.vocab_file , __A ) return (out_vocab_file,)
84
"""simple docstring""" from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __UpperCAmelCase = 1.054571817e-34 # unit of ℏ : J * s __UpperCAmelCase = 3e8 # unit of c : m * s^-1 def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> dict[str, float]: '''simple docstring''' if (force, area, distance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if force < 0: raise ValueError("""Magnitude of force can not be negative""" ) if distance < 0: raise ValueError("""Distance can not be negative""" ) if area < 0: raise ValueError("""Area can not be negative""" ) if force == 0: lowerCAmelCase_ :Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_4_0 * (distance) ** 4 ) return {"force": force} elif area == 0: lowerCAmelCase_ :Optional[Any] = (2_4_0 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: lowerCAmelCase_ :Any = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("""One and only one argument must be 0""" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
84
1
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Optional[Any] = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def __lowerCAmelCase ( self , __A=0 ) -> Optional[Any]: lowerCAmelCase_ :Dict = floats_tensor((1, 3, 128, 128) , rng=random.Random(__A ) ) lowerCAmelCase_ :Tuple = np.random.RandomState(__A ) lowerCAmelCase_ :List[Any] = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """strength""": 0.7_5, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :int = self.get_dummy_inputs() lowerCAmelCase_ :Dict = pipe(**__A ).images lowerCAmelCase_ :Optional[int] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) lowerCAmelCase_ :int = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs() lowerCAmelCase_ :List[str] = pipe(**__A ).images lowerCAmelCase_ :Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCAmelCase_ :Optional[Any] = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :str = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) # warmup pass to apply optimizations lowerCAmelCase_ :List[Any] = pipe(**self.get_dummy_inputs() ) lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Tuple = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCAmelCase_ :Any = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Dict = self.get_dummy_inputs() lowerCAmelCase_ :Optional[int] = pipe(**__A ).images lowerCAmelCase_ :Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCAmelCase_ :Tuple = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Dict = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Any = pipe(**__A ).images lowerCAmelCase_ :List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCAmelCase_ :List[Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :int = self.get_dummy_inputs() lowerCAmelCase_ :List[Any] = pipe(**__A ).images lowerCAmelCase_ :int = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCAmelCase_ :int = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @property def __lowerCAmelCase ( self ) -> Tuple: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :List[Any] = ort.SessionOptions() lowerCAmelCase_ :Dict = False return options def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase_ :Tuple = init_image.resize((768, 512) ) # using the PNDM scheduler by default lowerCAmelCase_ :Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[int] = """A fantasy landscape, trending on artstation""" lowerCAmelCase_ :Optional[int] = np.random.RandomState(0 ) lowerCAmelCase_ :Tuple = pipe( prompt=__A , image=__A , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :Union[str, Any] = output.images lowerCAmelCase_ :int = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) lowerCAmelCase_ :Union[str, Any] = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase_ :Any = init_image.resize((768, 512) ) lowerCAmelCase_ :Any = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) lowerCAmelCase_ :List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__A , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :int = """A fantasy landscape, trending on artstation""" lowerCAmelCase_ :Optional[Any] = np.random.RandomState(0 ) lowerCAmelCase_ :str = pipe( prompt=__A , image=__A , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :List[Any] = output.images lowerCAmelCase_ :Dict = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) lowerCAmelCase_ :int = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
84
"""simple docstring""" def _snake_case ( lowercase__ : str , lowercase__ : str ) -> int: '''simple docstring''' if len(lowercase__ ) != len(lowercase__ ): raise ValueError("""String lengths must match!""" ) lowerCAmelCase_ :Optional[int] = 0 for chara, chara in zip(lowercase__ , lowercase__ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
84
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} # See all BART models at https://huggingface.co/models?filter=bart __UpperCAmelCase = { 'vocab_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json', }, 'merges_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt', }, 'tokenizer_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json', }, } __UpperCAmelCase = { 'facebook/bart-base': 10_24, 'facebook/bart-large': 10_24, 'facebook/bart-large-mnli': 10_24, 'facebook/bart-large-cnn': 10_24, 'facebook/bart-large-xsum': 10_24, 'yjernite/bart_eli5': 10_24, } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :List[str] = VOCAB_FILES_NAMES UpperCAmelCase_ :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ :Optional[Any] = ["input_ids", "attention_mask"] UpperCAmelCase_ :int = BartTokenizer def __init__( self , __A=None , __A=None , __A=None , __A="replace" , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=False , __A=True , **__A , ) -> Any: super().__init__( __A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , ) lowerCAmelCase_ :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , __A ) != add_prefix_space: lowerCAmelCase_ :Optional[Any] = getattr(__A , pre_tok_state.pop("""type""" ) ) lowerCAmelCase_ :Optional[int] = add_prefix_space lowerCAmelCase_ :Any = pre_tok_class(**__A ) lowerCAmelCase_ :int = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowerCAmelCase_ :Dict = """post_processor""" lowerCAmelCase_ :Optional[Any] = getattr(self.backend_tokenizer , __A , __A ) if tokenizer_component_instance: lowerCAmelCase_ :List[str] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowerCAmelCase_ :str = tuple(state["""sep"""] ) if "cls" in state: lowerCAmelCase_ :Any = tuple(state["""cls"""] ) lowerCAmelCase_ :Dict = False if state.get("""add_prefix_space""" , __A ) != add_prefix_space: lowerCAmelCase_ :int = add_prefix_space lowerCAmelCase_ :Dict = True if state.get("""trim_offsets""" , __A ) != trim_offsets: lowerCAmelCase_ :Tuple = trim_offsets lowerCAmelCase_ :str = True if changes_to_apply: lowerCAmelCase_ :Tuple = getattr(__A , state.pop("""type""" ) ) lowerCAmelCase_ :int = component_class(**__A ) setattr(self.backend_tokenizer , __A , __A ) @property def __lowerCAmelCase ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def __lowerCAmelCase ( self , __A ) -> str: lowerCAmelCase_ :List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value lowerCAmelCase_ :Optional[int] = value def __lowerCAmelCase ( self , *__A , **__A ) -> BatchEncoding: lowerCAmelCase_ :Optional[int] = kwargs.get("""is_split_into_words""" , __A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*__A , **__A ) def __lowerCAmelCase ( self , *__A , **__A ) -> BatchEncoding: lowerCAmelCase_ :Optional[Any] = kwargs.get("""is_split_into_words""" , __A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ """to use it with pretokenized inputs.""" ) return super()._encode_plus(*__A , **__A ) def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]: lowerCAmelCase_ :Tuple = self._tokenizer.model.save(__A , name=__A ) return tuple(__A ) def __lowerCAmelCase ( self , __A , __A=None ) -> Tuple: lowerCAmelCase_ :Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]: lowerCAmelCase_ :Any = [self.sep_token_id] lowerCAmelCase_ :Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
84
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A ) -> Optional[Any]: super().__init__() lowerCAmelCase_ :int = nn.ModuleList(__A ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A = None , __A = None , __A = None , __A = None , __A = False , __A = True , ) -> Union[ControlNetOutput, Tuple]: for i, (image, scale, controlnet) in enumerate(zip(__A , __A , self.nets ) ): lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = controlnet( __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) # merge samples if i == 0: lowerCAmelCase_ , lowerCAmelCase_ :Tuple = down_samples, mid_sample else: lowerCAmelCase_ :str = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__A , __A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __lowerCAmelCase ( self , __A , __A = True , __A = None , __A = False , __A = None , ) -> Optional[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = save_directory for controlnet in self.nets: controlnet.save_pretrained( __A , is_main_process=__A , save_function=__A , safe_serialization=__A , variant=__A , ) idx += 1 lowerCAmelCase_ :Any = model_path_to_save + f"""_{idx}""" @classmethod def __lowerCAmelCase ( cls , __A , **__A ) -> List[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... lowerCAmelCase_ :List[Any] = pretrained_model_path while os.path.isdir(__A ): lowerCAmelCase_ :Tuple = ControlNetModel.from_pretrained(__A , **__A ) controlnets.append(__A ) idx += 1 lowerCAmelCase_ :Dict = pretrained_model_path + f"""_{idx}""" logger.info(f"""{len(__A )} controlnets loaded from {pretrained_model_path}.""" ) if len(__A ) == 0: raise ValueError( f"""No ControlNets found under {os.path.dirname(__A )}. Expected at least {pretrained_model_path + "_0"}.""" ) return cls(__A )
84
1
"""simple docstring""" import argparse import os import shutil from pathlib import Path import onnx import torch from packaging import version from torch.onnx import export from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline __UpperCAmelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11') def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : tuple , lowercase__ : Path , lowercase__ : int , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Tuple=False , ) -> Tuple: '''simple docstring''' output_path.parent.mkdir(parents=lowercase__ , exist_ok=lowercase__ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( lowercase__ , lowercase__ , f=output_path.as_posix() , input_names=lowercase__ , output_names=lowercase__ , dynamic_axes=lowercase__ , do_constant_folding=lowercase__ , use_external_data_format=lowercase__ , enable_onnx_checker=lowercase__ , opset_version=lowercase__ , ) else: export( lowercase__ , lowercase__ , f=output_path.as_posix() , input_names=lowercase__ , output_names=lowercase__ , dynamic_axes=lowercase__ , do_constant_folding=lowercase__ , opset_version=lowercase__ , ) @torch.no_grad() def _snake_case ( lowercase__ : str , lowercase__ : str , lowercase__ : int , lowercase__ : bool = False ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :List[Any] = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): lowerCAmelCase_ :List[str] = """cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" ) else: lowerCAmelCase_ :int = """cpu""" lowerCAmelCase_ :Union[str, Any] = StableDiffusionPipeline.from_pretrained(lowercase__ , torch_dtype=lowercase__ ).to(lowercase__ ) lowerCAmelCase_ :Dict = Path(lowercase__ ) # TEXT ENCODER lowerCAmelCase_ :Any = pipeline.text_encoder.config.max_position_embeddings lowerCAmelCase_ :Tuple = pipeline.text_encoder.config.hidden_size lowerCAmelCase_ :Optional[Any] = pipeline.tokenizer( """A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=lowercase__ , return_tensors="""pt""" , ) onnx_export( pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowercase__ , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={ """input_ids""": {0: """batch""", 1: """sequence"""}, } , opset=lowercase__ , ) del pipeline.text_encoder # UNET lowerCAmelCase_ :Dict = pipeline.unet.config.in_channels lowerCAmelCase_ :List[str] = pipeline.unet.config.sample_size lowerCAmelCase_ :str = output_path / """unet""" / """model.onnx""" onnx_export( pipeline.unet , model_args=( torch.randn(2 , lowercase__ , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ), torch.randn(2 ).to(device=lowercase__ , dtype=lowercase__ ), torch.randn(2 , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ), False, ) , output_path=lowercase__ , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={ """sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, """timestep""": {0: """batch"""}, """encoder_hidden_states""": {0: """batch""", 1: """sequence"""}, } , opset=lowercase__ , use_external_data_format=lowercase__ , ) lowerCAmelCase_ :Any = str(unet_path.absolute().as_posix() ) lowerCAmelCase_ :Dict = os.path.dirname(lowercase__ ) lowerCAmelCase_ :Optional[Any] = onnx.load(lowercase__ ) # clean up existing tensor files shutil.rmtree(lowercase__ ) os.mkdir(lowercase__ ) # collate external tensor files into one onnx.save_model( lowercase__ , lowercase__ , save_as_external_data=lowercase__ , all_tensors_to_one_file=lowercase__ , location="""weights.pb""" , convert_attribute=lowercase__ , ) del pipeline.unet # VAE ENCODER lowerCAmelCase_ :str = pipeline.vae lowerCAmelCase_ :Dict = vae_encoder.config.in_channels lowerCAmelCase_ :Dict = vae_encoder.config.sample_size # need to get the raw tensor output (sample) from the encoder lowerCAmelCase_ :Optional[int] = lambda lowercase__ , lowercase__ : vae_encoder.encode(lowercase__ , lowercase__ )[0].sample() onnx_export( lowercase__ , model_args=( torch.randn(1 , lowercase__ , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ), False, ) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={ """sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=lowercase__ , ) # VAE DECODER lowerCAmelCase_ :Dict = pipeline.vae lowerCAmelCase_ :Optional[int] = vae_decoder.config.latent_channels lowerCAmelCase_ :List[str] = vae_decoder.config.out_channels # forward only through the decoder part lowerCAmelCase_ :List[str] = vae_encoder.decode onnx_export( lowercase__ , model_args=( torch.randn(1 , lowercase__ , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ), False, ) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={ """latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=lowercase__ , ) del pipeline.vae # SAFETY CHECKER if pipeline.safety_checker is not None: lowerCAmelCase_ :Dict = pipeline.safety_checker lowerCAmelCase_ :Dict = safety_checker.config.vision_config.num_channels lowerCAmelCase_ :Any = safety_checker.config.vision_config.image_size lowerCAmelCase_ :Any = safety_checker.forward_onnx onnx_export( pipeline.safety_checker , model_args=( torch.randn( 1 , lowercase__ , lowercase__ , lowercase__ , ).to(device=lowercase__ , dtype=lowercase__ ), torch.randn(1 , lowercase__ , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ), ) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={ """clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, """images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""}, } , opset=lowercase__ , ) del pipeline.safety_checker lowerCAmelCase_ :List[Any] = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" ) lowerCAmelCase_ :Tuple = pipeline.feature_extractor else: lowerCAmelCase_ :str = None lowerCAmelCase_ :str = None lowerCAmelCase_ :Optional[int] = OnnxStableDiffusionPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=lowercase__ , feature_extractor=lowercase__ , requires_safety_checker=safety_checker is not None , ) onnx_pipeline.save_pretrained(lowercase__ ) print("""ONNX pipeline saved to""" , lowercase__ ) del pipeline del onnx_pipeline lowerCAmelCase_ :List[str] = OnnxStableDiffusionPipeline.from_pretrained(lowercase__ , provider="""CPUExecutionProvider""" ) print("""ONNX pipeline is loadable""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '--model_path', type=str, required=True, help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).', ) parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.') parser.add_argument( '--opset', default=14, type=int, help='The version of the ONNX operator set to use.', ) parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode') __UpperCAmelCase = parser.parse_args() convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
84
"""simple docstring""" from PIL import Image def _snake_case ( lowercase__ : Image , lowercase__ : float ) -> Image: '''simple docstring''' def brightness(lowercase__ : int ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(lowercase__ ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 __UpperCAmelCase = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
84
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Any = "Salesforce/blip-image-captioning-base" UpperCAmelCase_ :str = ( "This is a tool that generates a description of an image. It takes an input named `image` which should be the " "image to caption, and returns a text that contains the description in English." ) UpperCAmelCase_ :List[Any] = "image_captioner" UpperCAmelCase_ :Tuple = AutoModelForVisionaSeq UpperCAmelCase_ :List[Any] = ["image"] UpperCAmelCase_ :Union[str, Any] = ["text"] def __init__( self , *__A , **__A ) -> str: requires_backends(self , ["""vision"""] ) super().__init__(*__A , **__A ) def __lowerCAmelCase ( self , __A ) -> List[Any]: return self.pre_processor(images=__A , return_tensors="""pt""" ) def __lowerCAmelCase ( self , __A ) -> str: return self.model.generate(**__A ) def __lowerCAmelCase ( self , __A ) -> Tuple: return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0].strip()
84
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class _SCREAMING_SNAKE_CASE : def __lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) lowerCAmelCase_ :int = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :List[Any] = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase_ :str = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) lowerCAmelCase_ :int = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ :Dict = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase_ :str = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[int] = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Dict = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Dict = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Any = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Optional[int] = inputs["""prompt"""] lowerCAmelCase_ :Optional[int] = inputs["""generator"""] lowerCAmelCase_ :Any = inputs["""num_inference_steps"""] lowerCAmelCase_ :Optional[int] = inputs["""output_type"""] if "image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""image"""] else: lowerCAmelCase_ :int = None if "mask_image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""mask_image"""] else: lowerCAmelCase_ :int = None if "original_image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""original_image"""] else: lowerCAmelCase_ :List[Any] = None lowerCAmelCase_ , lowerCAmelCase_ :int = pipe.encode_prompt(__A ) # inputs with prompt converted to embeddings lowerCAmelCase_ :List[str] = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: lowerCAmelCase_ :int = image if mask_image is not None: lowerCAmelCase_ :Tuple = mask_image if original_image is not None: lowerCAmelCase_ :Optional[Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(__A , __A , __A ) lowerCAmelCase_ :Optional[int] = pipe(**__A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__A ) lowerCAmelCase_ :Optional[int] = self.pipeline_class.from_pretrained(__A ) pipe_loaded.to(__A ) pipe_loaded.set_progress_bar_config(disable=__A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(__A , __A ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) lowerCAmelCase_ :Dict = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Union[str, Any] = inputs["""generator"""] lowerCAmelCase_ :Any = inputs["""num_inference_steps"""] lowerCAmelCase_ :Tuple = inputs["""output_type"""] # inputs with prompt converted to embeddings lowerCAmelCase_ :Tuple = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: lowerCAmelCase_ :Optional[int] = image if mask_image is not None: lowerCAmelCase_ :str = mask_image if original_image is not None: lowerCAmelCase_ :Tuple = original_image lowerCAmelCase_ :Union[str, Any] = pipe_loaded(**__A )[0] lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max() self.assertLess(__A , 1E-4 ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Any = self.get_dummy_components() lowerCAmelCase_ :Optional[int] = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[int] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Dict = pipe(**__A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__A ) lowerCAmelCase_ :Any = self.pipeline_class.from_pretrained(__A ) pipe_loaded.to(__A ) pipe_loaded.set_progress_bar_config(disable=__A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests lowerCAmelCase_ :List[Any] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :str = pipe_loaded(**__A )[0] lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max() self.assertLess(__A , 1E-4 )
84
1
"""simple docstring""" from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def _snake_case ( lowercase__ : int ) -> int: '''simple docstring''' lowerCAmelCase_ :str = prime_factors(lowercase__ ) if is_square_free(lowercase__ ): return -1 if len(lowercase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
84
"""simple docstring""" import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) lowerCAmelCase_ :int = """A painting of a squirrel eating a burger""" lowerCAmelCase_ :List[Any] = jax.device_count() lowerCAmelCase_ :Optional[Any] = num_samples * [prompt] lowerCAmelCase_ :int = sd_pipe.prepare_inputs(__A ) lowerCAmelCase_ :Optional[Any] = replicate(__A ) lowerCAmelCase_ :Union[str, Any] = shard(__A ) lowerCAmelCase_ :Optional[Any] = jax.random.PRNGKey(0 ) lowerCAmelCase_ :Tuple = jax.random.split(__A , jax.device_count() ) lowerCAmelCase_ :Union[str, Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowerCAmelCase_ :Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1] lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCAmelCase_ :Optional[int] = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Union[str, Any] = """stabilityai/stable-diffusion-2""" lowerCAmelCase_ , lowerCAmelCase_ :Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__A , subfolder="""scheduler""" ) lowerCAmelCase_ , lowerCAmelCase_ :List[str] = FlaxStableDiffusionPipeline.from_pretrained( __A , scheduler=__A , revision="""bf16""" , dtype=jnp.bfloataa , ) lowerCAmelCase_ :Optional[int] = scheduler_params lowerCAmelCase_ :List[Any] = """A painting of a squirrel eating a burger""" lowerCAmelCase_ :Tuple = jax.device_count() lowerCAmelCase_ :str = num_samples * [prompt] lowerCAmelCase_ :Union[str, Any] = sd_pipe.prepare_inputs(__A ) lowerCAmelCase_ :Tuple = replicate(__A ) lowerCAmelCase_ :Optional[int] = shard(__A ) lowerCAmelCase_ :List[str] = jax.random.PRNGKey(0 ) lowerCAmelCase_ :List[Any] = jax.random.split(__A , jax.device_count() ) lowerCAmelCase_ :Optional[Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowerCAmelCase_ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1] lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCAmelCase_ :Dict = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
84
1
"""simple docstring""" from PIL import Image def _snake_case ( lowercase__ : Image , lowercase__ : float ) -> Image: '''simple docstring''' def brightness(lowercase__ : int ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(lowercase__ ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 __UpperCAmelCase = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
84
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def _snake_case ( ) -> Generator[int, None, None]: '''simple docstring''' lowerCAmelCase_ :dict[int, int] = {} lowerCAmelCase_ :int = 2 while True: lowerCAmelCase_ :List[Any] = factor_map.pop(lowercase__ , lowercase__ ) if factor: lowerCAmelCase_ :Optional[int] = factor + prime while x in factor_map: x += factor lowerCAmelCase_ :List[str] = factor else: lowerCAmelCase_ :Optional[int] = prime yield prime prime += 1 def _snake_case ( lowercase__ : float = 1E10 ) -> int: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = sieve() lowerCAmelCase_ :str = 1 while True: lowerCAmelCase_ :int = next(lowercase__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(lowercase__ ) n += 2 if __name__ == "__main__": print(solution())
84
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __UpperCAmelCase = { 'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'], 'feature_extraction_whisper': ['WhisperFeatureExtractor'], 'processing_whisper': ['WhisperProcessor'], 'tokenization_whisper': ['WhisperTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['WhisperTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'WhisperForConditionalGeneration', 'WhisperModel', 'WhisperPreTrainedModel', 'WhisperForAudioClassification', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWhisperForConditionalGeneration', 'TFWhisperModel', 'TFWhisperPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'FlaxWhisperForConditionalGeneration', 'FlaxWhisperModel', 'FlaxWhisperPreTrainedModel', 'FlaxWhisperForAudioClassification', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
84
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): # TODO: is there an appropriate internal test set? UpperCAmelCase_ :List[Any] = "ssube/stable-diffusion-x4-upscaler-onnx" def __lowerCAmelCase ( self , __A=0 ) -> Optional[int]: lowerCAmelCase_ :Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__A ) ) lowerCAmelCase_ :List[Any] = torch.manual_seed(__A ) lowerCAmelCase_ :Tuple = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Dict = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :int = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :int = self.get_dummy_inputs() lowerCAmelCase_ :List[str] = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :str = np.array( [0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Union[str, Any] = pipe(**__A ).images lowerCAmelCase_ :Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Tuple = np.array( [0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Union[str, Any] = self.get_dummy_inputs() lowerCAmelCase_ :Optional[Any] = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Tuple = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Dict = pipe(**__A ).images lowerCAmelCase_ :Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Dict = np.array( [0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @property def __lowerCAmelCase ( self ) -> List[Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Optional[int] = ort.SessionOptions() lowerCAmelCase_ :Dict = False return options def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase_ :Optional[Any] = init_image.resize((128, 128) ) # using the PNDM scheduler by default lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Union[str, Any] = """A fantasy landscape, trending on artstation""" lowerCAmelCase_ :List[Any] = torch.manual_seed(0 ) lowerCAmelCase_ :str = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :Dict = output.images lowerCAmelCase_ :List[str] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase_ :Optional[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase_ :List[str] = init_image.resize((128, 128) ) lowerCAmelCase_ :Any = LMSDiscreteScheduler.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" ) lowerCAmelCase_ :Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Any = """A fantasy landscape, trending on artstation""" lowerCAmelCase_ :Optional[Any] = torch.manual_seed(0 ) lowerCAmelCase_ :List[str] = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=20 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :int = output.images lowerCAmelCase_ :List[Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase_ :Union[str, Any] = np.array( [0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
84
1
"""simple docstring""" import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE : UpperCAmelCase_ :List[str] = None @experimental def _snake_case ( lowercase__ : Dict , lowercase__ : int , lowercase__ : Any , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) return _map_with_joblib(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : Any , lowercase__ : str , lowercase__ : Tuple ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[str] = num_proc if num_proc <= len(lowercase__ ) else len(lowercase__ ) lowerCAmelCase_ :Union[str, Any] = [] # We organize the splits ourselve (contiguous splits) for index in range(lowercase__ ): lowerCAmelCase_ :List[str] = len(lowercase__ ) // num_proc lowerCAmelCase_ :List[Any] = len(lowercase__ ) % num_proc lowerCAmelCase_ :str = div * index + min(lowercase__ , lowercase__ ) lowerCAmelCase_ :List[str] = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(lowercase__ ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( f"""Error dividing inputs iterable among processes. """ f"""Total number of objects {len(lowercase__ )}, """ f"""length: {sum(len(i[1] ) for i in split_kwds )}""" ) logger.info( f"""Spawning {num_proc} processes for {len(lowercase__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" ) lowerCAmelCase_ , lowerCAmelCase_ :int = None, None if not disable_tqdm: lowerCAmelCase_ , lowerCAmelCase_ :Dict = (RLock(),), tqdm.set_lock with Pool(lowercase__ , initargs=lowercase__ , initializer=lowercase__ ) as pool: lowerCAmelCase_ :Tuple = pool.map(lowercase__ , lowercase__ ) logger.info(f"""Finished {num_proc} processes""" ) lowerCAmelCase_ :str = [obj for proc_res in mapped for obj in proc_res] logger.info(f"""Unpacked {len(lowercase__ )} objects""" ) return mapped def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] , lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : str , lowercase__ : str , lowercase__ : str ) -> Any: '''simple docstring''' import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=lowercase__ ): return joblib.Parallel()( joblib.delayed(lowercase__ )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def _snake_case ( lowercase__ : str ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[Any] = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: lowerCAmelCase_ :Optional[Any] = None
84
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Dict: if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=__A , ) assert hasattr(self , """env""" ) def __lowerCAmelCase ( self , __A ) -> Any: # configuration for running training on smdistributed Model Parallel lowerCAmelCase_ :Union[str, Any] = { """enabled""": True, """processes_per_host""": 8, } lowerCAmelCase_ :Tuple = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCAmelCase_ :Any = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCAmelCase_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__A , instance_type=self.instance_type , debugger_hook_config=__A , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=__A , py_version="""py36""" , ) def __lowerCAmelCase ( self , __A ) -> List[Any]: TrainingJobAnalytics(__A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def __lowerCAmelCase ( self , __A ) -> List[str]: # create estimator lowerCAmelCase_ :Any = self.create_estimator(__A ) # run training estimator.fit() # result dataframe lowerCAmelCase_ :Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase_ :List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase_ :Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase_ :Optional[int] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __A )
84
1
"""simple docstring""" def _snake_case ( lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> Any: '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: lowerCAmelCase_ :Union[str, Any] = mf_knapsack(i - 1 , lowercase__ , lowercase__ , lowercase__ ) else: lowerCAmelCase_ :str = max( mf_knapsack(i - 1 , lowercase__ , lowercase__ , lowercase__ ) , mf_knapsack(i - 1 , lowercase__ , lowercase__ , j - wt[i - 1] ) + val[i - 1] , ) lowerCAmelCase_ :Union[str, Any] = val return f[i][j] def _snake_case ( lowercase__ : str , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : Tuple ) -> int: '''simple docstring''' lowerCAmelCase_ :Any = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: lowerCAmelCase_ :List[str] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: lowerCAmelCase_ :Optional[Any] = dp[i - 1][w_] return dp[n][w_], dp def _snake_case ( lowercase__ : int , lowercase__ : list , lowercase__ : list ) -> Any: '''simple docstring''' if not (isinstance(lowercase__ , (list, tuple) ) and isinstance(lowercase__ , (list, tuple) )): raise ValueError( """Both the weights and values vectors must be either lists or tuples""" ) lowerCAmelCase_ :Dict = len(lowercase__ ) if num_items != len(lowercase__ ): lowerCAmelCase_ :Union[str, Any] = ( """The number of weights must be the same as the number of values.\n""" f"""But got {num_items} weights and {len(lowercase__ )} values""" ) raise ValueError(lowercase__ ) for i in range(lowercase__ ): if not isinstance(wt[i] , lowercase__ ): lowerCAmelCase_ :Tuple = ( """All weights must be integers but got weight of """ f"""type {type(wt[i] )} at index {i}""" ) raise TypeError(lowercase__ ) lowerCAmelCase_ , lowerCAmelCase_ :List[str] = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) lowerCAmelCase_ :set = set() _construct_solution(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) return optimal_val, example_optional_set def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : set ) -> List[Any]: '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(lowercase__ , lowercase__ , i - 1 , lowercase__ , lowercase__ ) else: optimal_set.add(lowercase__ ) _construct_solution(lowercase__ , lowercase__ , i - 1 , j - wt[i - 1] , lowercase__ ) if __name__ == "__main__": __UpperCAmelCase = [3, 2, 4, 4] __UpperCAmelCase = [4, 3, 2, 3] __UpperCAmelCase = 4 __UpperCAmelCase = 6 __UpperCAmelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] __UpperCAmelCase , __UpperCAmelCase = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 __UpperCAmelCase , __UpperCAmelCase = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print('optimal_value = ', optimal_solution) print('An optimal subset corresponding to the optimal value', optimal_subset)
84
"""simple docstring""" def _snake_case ( lowercase__ : int = 1_0 ) -> str: '''simple docstring''' if not isinstance(lowercase__ , lowercase__ ) or n < 0: raise ValueError("""Invalid input""" ) lowerCAmelCase_ :List[str] = 1_0**n lowerCAmelCase_ :int = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , lowercase__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"""{solution(10) = }""")
84
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { 'configuration_time_series_transformer': [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimeSeriesTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimeSeriesTransformerForPrediction', 'TimeSeriesTransformerModel', 'TimeSeriesTransformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
84
"""simple docstring""" import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __UpperCAmelCase = 'src/transformers' __UpperCAmelCase = 'docs/source/en/tasks' def _snake_case ( lowercase__ : str , lowercase__ : List[str] , lowercase__ : Any ) -> str: '''simple docstring''' with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCAmelCase_ :List[Any] = f.readlines() # Find the start prompt. lowerCAmelCase_ :Tuple = 0 while not lines[start_index].startswith(lowercase__ ): start_index += 1 start_index += 1 lowerCAmelCase_ :Dict = start_index while not lines[end_index].startswith(lowercase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH) __UpperCAmelCase = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __UpperCAmelCase = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def _snake_case ( lowercase__ : List[str] ) -> str: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide] lowerCAmelCase_ :List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowercase__ , set() ) lowerCAmelCase_ :Union[str, Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def _snake_case ( lowercase__ : int , lowercase__ : str=False ) -> Dict: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = _find_text_in_file( filename=os.path.join(lowercase__ , lowercase__ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , ) lowerCAmelCase_ :int = get_model_list_for_task(lowercase__ ) if current_list != new_list: if overwrite: with open(os.path.join(lowercase__ , lowercase__ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" """ to fix this.""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __UpperCAmelCase = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
84
1
"""simple docstring""" from queue import PriorityQueue from typing import Any import numpy as np def _snake_case ( lowercase__ : dict , lowercase__ : str , lowercase__ : set , lowercase__ : set , lowercase__ : dict , lowercase__ : dict , lowercase__ : PriorityQueue , lowercase__ : dict , lowercase__ : float | int , ) -> float | int: '''simple docstring''' for nxt, d in graph[v]: if nxt in visited_forward: continue lowerCAmelCase_ :List[str] = cst_fwd.get(lowercase__ , np.inf ) lowerCAmelCase_ :Tuple = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) lowerCAmelCase_ :List[Any] = new_cost_f lowerCAmelCase_ :Tuple = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: lowerCAmelCase_ :List[Any] = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def _snake_case ( lowercase__ : str , lowercase__ : str , lowercase__ : dict , lowercase__ : dict ) -> int: '''simple docstring''' lowerCAmelCase_ :List[str] = -1 lowerCAmelCase_ :List[str] = set() lowerCAmelCase_ :Optional[int] = set() lowerCAmelCase_ :Union[str, Any] = {source: 0} lowerCAmelCase_ :List[Any] = {destination: 0} lowerCAmelCase_ :List[str] = {source: None} lowerCAmelCase_ :Optional[Any] = {destination: None} lowerCAmelCase_ :PriorityQueue[Any] = PriorityQueue() lowerCAmelCase_ :PriorityQueue[Any] = PriorityQueue() lowerCAmelCase_ :str = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = queue_forward.get() visited_forward.add(lowercase__ ) lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = queue_backward.get() visited_backward.add(lowercase__ ) lowerCAmelCase_ :Any = pass_and_relaxation( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) lowerCAmelCase_ :str = pass_and_relaxation( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: lowerCAmelCase_ :Union[str, Any] = shortest_distance return shortest_path_distance __UpperCAmelCase = { 'B': [['C', 1]], 'C': [['D', 1]], 'D': [['F', 1]], 'E': [['B', 1], ['G', 2]], 'F': [], 'G': [['F', 1]], } __UpperCAmelCase = { 'B': [['E', 1]], 'C': [['B', 1]], 'D': [['C', 1]], 'F': [['D', 1], ['G', 1]], 'E': [[None, np.inf]], 'G': [['E', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
84
"""simple docstring""" def _snake_case ( lowercase__ : list[int] ) -> list[list[int]]: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = [] if len(lowercase__ ) == 1: return [nums.copy()] for _ in range(len(lowercase__ ) ): lowerCAmelCase_ :Optional[Any] = nums.pop(0 ) lowerCAmelCase_ :str = permute(lowercase__ ) for perm in permutations: perm.append(lowercase__ ) result.extend(lowercase__ ) nums.append(lowercase__ ) return result def _snake_case ( lowercase__ : Tuple ) -> List[str]: '''simple docstring''' def backtrack(lowercase__ : str ): if start == len(lowercase__ ) - 1: output.append(nums[:] ) else: for i in range(lowercase__ , len(lowercase__ ) ): lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start] backtrack(start + 1 ) lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start] # backtrack lowerCAmelCase_ :int = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function __UpperCAmelCase = permutea([1, 2, 3]) print(res) doctest.testmod()
84
1
"""simple docstring""" from math import isclose, sqrt def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> tuple[float, float, float]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = point_y / 4 / point_x lowerCAmelCase_ :Dict = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) lowerCAmelCase_ :Union[str, Any] = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) lowerCAmelCase_ :str = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 lowerCAmelCase_ :Tuple = outgoing_gradient**2 + 4 lowerCAmelCase_ :Tuple = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) lowerCAmelCase_ :str = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0 lowerCAmelCase_ :Optional[Any] = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) lowerCAmelCase_ :Optional[int] = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point lowerCAmelCase_ :List[Any] = x_minus if isclose(lowercase__ , lowercase__ ) else x_plus lowerCAmelCase_ :List[str] = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def _snake_case ( lowercase__ : float = 1.4 , lowercase__ : float = -9.6 ) -> int: '''simple docstring''' lowerCAmelCase_ :int = 0 lowerCAmelCase_ :float = first_x_coord lowerCAmelCase_ :float = first_y_coord lowerCAmelCase_ :float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_point(lowercase__ , lowercase__ , lowercase__ ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(F"""{solution() = }""")
84
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Any = BioGptTokenizer UpperCAmelCase_ :str = False def __lowerCAmelCase ( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase_ :Optional[Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] lowerCAmelCase_ :str = dict(zip(__A , range(len(__A ) ) ) ) lowerCAmelCase_ :int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(__A ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(__A ) ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: lowerCAmelCase_ :List[Any] = """lower newer""" lowerCAmelCase_ :Tuple = """lower newer""" return input_text, output_text def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :List[str] = BioGptTokenizer(self.vocab_file , self.merges_file ) lowerCAmelCase_ :Union[str, Any] = """lower""" lowerCAmelCase_ :Any = ["""low""", """er</w>"""] lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Dict = tokens + ["""<unk>"""] lowerCAmelCase_ :List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) @slow def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) lowerCAmelCase_ :List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__A ) lowerCAmelCase_ :List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A ) lowerCAmelCase_ :Optional[int] = tokenizer.build_inputs_with_special_tokens(__A ) lowerCAmelCase_ :List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
84
1
"""simple docstring""" from math import asin, atan, cos, radians, sin, sqrt, tan __UpperCAmelCase = 6378137.0 __UpperCAmelCase = 6356752.314245 __UpperCAmelCase = 6_37_81_37 def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> float: '''simple docstring''' lowerCAmelCase_ :List[str] = (AXIS_A - AXIS_B) / AXIS_A lowerCAmelCase_ :str = atan((1 - flattening) * tan(radians(lowercase__ ) ) ) lowerCAmelCase_ :str = atan((1 - flattening) * tan(radians(lowercase__ ) ) ) lowerCAmelCase_ :Union[str, Any] = radians(lowercase__ ) lowerCAmelCase_ :List[Any] = radians(lowercase__ ) # Equation lowerCAmelCase_ :Any = sin((phi_a - phi_a) / 2 ) lowerCAmelCase_ :Tuple = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda lowerCAmelCase_ :Any = sqrt(sin_sq_phi + (cos(lowercase__ ) * cos(lowercase__ ) * sin_sq_lambda) ) return 2 * RADIUS * asin(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
84
"""simple docstring""" from ...configuration_utils import PretrainedConfig class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "bert-generation" def __init__( self , __A=5_0358 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A=2 , __A=1 , __A="absolute" , __A=True , **__A , ) -> Tuple: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :Any = vocab_size lowerCAmelCase_ :List[Any] = hidden_size lowerCAmelCase_ :Optional[int] = num_hidden_layers lowerCAmelCase_ :int = num_attention_heads lowerCAmelCase_ :List[Any] = hidden_act lowerCAmelCase_ :Optional[Any] = intermediate_size lowerCAmelCase_ :List[Any] = hidden_dropout_prob lowerCAmelCase_ :int = attention_probs_dropout_prob lowerCAmelCase_ :Tuple = max_position_embeddings lowerCAmelCase_ :List[str] = initializer_range lowerCAmelCase_ :Union[str, Any] = layer_norm_eps lowerCAmelCase_ :List[str] = position_embedding_type lowerCAmelCase_ :Optional[int] = use_cache
84
1
"""simple docstring""" import sys def _snake_case ( lowercase__ : Tuple ) -> Any: '''simple docstring''' lowerCAmelCase_ :str = len(lowercase__ ) lowerCAmelCase_ :Dict = [[0 for x in range(lowercase__ )] for x in range(lowercase__ )] lowerCAmelCase_ :Any = [[0 for x in range(lowercase__ )] for x in range(lowercase__ )] for chain_length in range(2 , lowercase__ ): for a in range(1 , n - chain_length + 1 ): lowerCAmelCase_ :str = a + chain_length - 1 lowerCAmelCase_ :Dict = sys.maxsize for c in range(lowercase__ , lowercase__ ): lowerCAmelCase_ :List[str] = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: lowerCAmelCase_ :Any = cost lowerCAmelCase_ :Optional[int] = c return matrix, sol def _snake_case ( lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]: '''simple docstring''' if i == j: print("""A""" + str(lowercase__ ) , end=""" """ ) else: print("""(""" , end=""" """ ) print_optiomal_solution(lowercase__ , lowercase__ , optimal_solution[i][j] ) print_optiomal_solution(lowercase__ , optimal_solution[i][j] + 1 , lowercase__ ) print(""")""" , end=""" """ ) def _snake_case ( ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5] lowerCAmelCase_ :Any = len(lowercase__ ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 lowerCAmelCase_ , lowerCAmelCase_ :Dict = matrix_chain_order(lowercase__ ) print("""No. of Operation required: """ + str(matrix[1][n - 1] ) ) print_optiomal_solution(lowercase__ , 1 , n - 1 ) if __name__ == "__main__": main()
84
"""simple docstring""" def _snake_case ( lowercase__ : List[Any] , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Any ) -> int: '''simple docstring''' lowerCAmelCase_ :int = [False] * len(lowercase__ ) lowerCAmelCase_ :str = [] queue.append(lowercase__ ) lowerCAmelCase_ :Any = True while queue: lowerCAmelCase_ :Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) lowerCAmelCase_ :Union[str, Any] = True lowerCAmelCase_ :int = u return visited[t] def _snake_case ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : str ) -> Dict: '''simple docstring''' lowerCAmelCase_ :List[Any] = [-1] * (len(lowercase__ )) lowerCAmelCase_ :str = 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): lowerCAmelCase_ :List[str] = float("""Inf""" ) lowerCAmelCase_ :List[str] = sink while s != source: # Find the minimum value in select path lowerCAmelCase_ :Any = min(lowercase__ , graph[parent[s]][s] ) lowerCAmelCase_ :Union[str, Any] = parent[s] max_flow += path_flow lowerCAmelCase_ :Tuple = sink while v != source: lowerCAmelCase_ :List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase_ :Union[str, Any] = parent[v] return max_flow __UpperCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __UpperCAmelCase , __UpperCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
84
1
"""simple docstring""" class _SCREAMING_SNAKE_CASE : def __init__( self , __A ) -> Dict: # we need a list not a string, so do something to change the type lowerCAmelCase_ :List[str] = arr.split(""",""" ) def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :List[str] = [int(self.array[0] )] * len(self.array ) lowerCAmelCase_ :str = [int(self.array[0] )] * len(self.array ) for i in range(1 , len(self.array ) ): lowerCAmelCase_ :List[str] = max( int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) ) lowerCAmelCase_ :int = max(sum_value[i] , rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": __UpperCAmelCase = input('please input some numbers:') __UpperCAmelCase = SubArray(whole_array) __UpperCAmelCase = array.solve_sub_array() print(('the results is:', re))
84
"""simple docstring""" import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = 1_0 lowerCAmelCase_ :Optional[int] = datasets.Features( { """tokens""": datasets.Sequence(datasets.Value("""string""" ) ), """labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ), """answers""": datasets.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), """id""": datasets.Value("""int64""" ), } ) lowerCAmelCase_ :int = datasets.Dataset.from_dict( { """tokens""": [["""foo"""] * 5] * n, """labels""": [[1] * 5] * n, """answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0, """id""": list(range(lowercase__ ) ), } , features=lowercase__ , ) return dataset @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : int ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return filename # FILE_CONTENT + files __UpperCAmelCase = '\\n Text data.\n Second line of data.' @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> str: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt""" lowerCAmelCase_ :List[Any] = FILE_CONTENT with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> Tuple: '''simple docstring''' import bza lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2""" lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] ) -> Dict: '''simple docstring''' import gzip lowerCAmelCase_ :int = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" ) lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with gzip.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' if datasets.config.LZ4_AVAILABLE: import lza.frame lowerCAmelCase_ :List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4""" lowerCAmelCase_ :int = bytes(lowercase__ , """utf-8""" ) with lza.frame.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[int] ) -> Any: '''simple docstring''' if datasets.config.PY7ZR_AVAILABLE: import pyazr lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z""" with pyazr.SevenZipFile(lowercase__ , """w""" ) as archive: archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' import tarfile lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> str: '''simple docstring''' import lzma lowerCAmelCase_ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz""" lowerCAmelCase_ :Optional[Any] = bytes(lowercase__ , """utf-8""" ) with lzma.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import zipfile lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Tuple: '''simple docstring''' if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst""" lowerCAmelCase_ :Any = bytes(lowercase__ , """utf-8""" ) with zstd.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """file.xml""" lowerCAmelCase_ :Any = textwrap.dedent( """\ <?xml version=\"1.0\" encoding=\"UTF-8\" ?> <tmx version=\"1.4\"> <header segtype=\"sentence\" srclang=\"ca\" /> <body> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv> </tu> </body> </tmx>""" ) with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename __UpperCAmelCase = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __UpperCAmelCase = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __UpperCAmelCase = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __UpperCAmelCase = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __UpperCAmelCase = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Union[str, Any]: '''simple docstring''' return DATA_DICT_OF_LISTS @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Any: '''simple docstring''' lowerCAmelCase_ :Tuple = datasets.Dataset.from_dict(lowercase__ ) lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" ) with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con: lowerCAmelCase_ :Union[str, Any] = con.cursor() cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" ) for item in DATA: cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> int: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Optional[int] = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Any: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Dict = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Dict ) -> Union[str, Any]: '''simple docstring''' import bza lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2""" with open(lowercase__ , """rb""" ) as f: lowerCAmelCase_ :Union[str, Any] = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) ) f.write(lowercase__ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : str ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" ) lowerCAmelCase_ :Optional[Any] = pa.schema( { """col_1""": pa.string(), """col_2""": pa.intaa(), """col_3""": pa.floataa(), } ) with open(lowercase__ , """wb""" ) as f: lowerCAmelCase_ :Optional[int] = pq.ParquetWriter(lowercase__ , schema=lowercase__ ) lowerCAmelCase_ :List[str] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ ) writer.write_table(lowercase__ ) writer.close() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Union[str, Any] = {"""data""": DATA} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Optional[Any] = {"""data""": DATA_DICT_OF_LISTS} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_312: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_STR: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int , lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : List[Any] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[str] ) -> int: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : List[str] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : str , lowercase__ : List[str] , lowercase__ : int ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :str = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Dict = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.abc""" with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : List[str] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename("""unsupported.ext""" ) ) f.write(lowercase__ , arcname=os.path.basename("""unsupported_2.ext""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] ) lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" ) with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> int: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Tuple: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : Tuple ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace(""".jpg""" , """2.jpg""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data_dir""" ) (data_dir / "subdir").mkdir() with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden file with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) return data_dir
84
1
"""simple docstring""" def _snake_case ( lowercase__ : Dict , lowercase__ : List[str] ) -> int: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = [1] for i in range(2 , lowercase__ ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" lowerCAmelCase_ :Any = [] lowerCAmelCase_ :str = list(range(lowercase__ ) ) # Find permutation while factorials: lowerCAmelCase_ :Tuple = factorials.pop() lowerCAmelCase_ , lowerCAmelCase_ :Any = divmod(lowercase__ , lowercase__ ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
84
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Optional[Any] = "data2vec-text" def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.0_2 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> Tuple: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :Dict = vocab_size lowerCAmelCase_ :Dict = hidden_size lowerCAmelCase_ :int = num_hidden_layers lowerCAmelCase_ :List[Any] = num_attention_heads lowerCAmelCase_ :Any = hidden_act lowerCAmelCase_ :Optional[int] = intermediate_size lowerCAmelCase_ :str = hidden_dropout_prob lowerCAmelCase_ :Any = attention_probs_dropout_prob lowerCAmelCase_ :str = max_position_embeddings lowerCAmelCase_ :int = type_vocab_size lowerCAmelCase_ :Tuple = initializer_range lowerCAmelCase_ :List[Any] = layer_norm_eps lowerCAmelCase_ :List[Any] = position_embedding_type lowerCAmelCase_ :List[Any] = use_cache lowerCAmelCase_ :List[Any] = classifier_dropout class _SCREAMING_SNAKE_CASE ( A__ ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCAmelCase_ :List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCAmelCase_ :List[str] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
84
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json', # See all LeViT models at https://huggingface.co/models?filter=levit } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "levit" def __init__( self , __A=224 , __A=3 , __A=3 , __A=2 , __A=1 , __A=16 , __A=[128, 256, 384] , __A=[4, 8, 12] , __A=[4, 4, 4] , __A=[16, 16, 16] , __A=0 , __A=[2, 2, 2] , __A=[2, 2, 2] , __A=0.0_2 , **__A , ) -> Any: super().__init__(**__A ) lowerCAmelCase_ :Tuple = image_size lowerCAmelCase_ :Optional[int] = num_channels lowerCAmelCase_ :Union[str, Any] = kernel_size lowerCAmelCase_ :Optional[Any] = stride lowerCAmelCase_ :Optional[int] = padding lowerCAmelCase_ :Optional[Any] = hidden_sizes lowerCAmelCase_ :Optional[int] = num_attention_heads lowerCAmelCase_ :int = depths lowerCAmelCase_ :List[str] = key_dim lowerCAmelCase_ :str = drop_path_rate lowerCAmelCase_ :Optional[int] = patch_size lowerCAmelCase_ :Union[str, Any] = attention_ratio lowerCAmelCase_ :Dict = mlp_ratio lowerCAmelCase_ :Any = initializer_range lowerCAmelCase_ :Optional[int] = [ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Tuple = version.parse("1.11" ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __lowerCAmelCase ( self ) -> float: return 1E-4
84
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( lowercase__ : Dict , lowercase__ : Dict , lowercase__ : str , lowercase__ : Tuple="attention" ) -> str: '''simple docstring''' lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""] lowerCAmelCase_ :Union[str, Any] = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""] lowerCAmelCase_ :Any = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""] lowerCAmelCase_ :Optional[int] = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""] return k, o, q, v def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : Any=False ) -> int: '''simple docstring''' if split_mlp_wi: lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""] lowerCAmelCase_ :List[str] = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""] lowerCAmelCase_ :Tuple = (wi_a, wi_a) else: lowerCAmelCase_ :List[Any] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""] lowerCAmelCase_ :Dict = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""] return wi, wo def _snake_case ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] ) -> Tuple: '''simple docstring''' return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""] def _snake_case ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = traverse_util.flatten_dict(variables["""target"""] ) lowerCAmelCase_ :Tuple = {"""/""".join(lowercase__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowerCAmelCase_ :Any = """encoder/layers_0/mlp/wi_0/kernel""" in old print("""Split MLP:""" , lowercase__ ) lowerCAmelCase_ :List[Any] = collections.OrderedDict() # Shared embeddings. lowerCAmelCase_ :Optional[int] = old["""token_embedder/embedding"""] # Encoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" ) lowerCAmelCase_ :Optional[Any] = layer_norm lowerCAmelCase_ :Any = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Tuple = q.T lowerCAmelCase_ :str = v.T # Block i, layer 1 (MLP). lowerCAmelCase_ :Dict = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Any = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ ) lowerCAmelCase_ :Union[str, Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :List[Any] = wi[0].T lowerCAmelCase_ :Dict = wi[1].T else: lowerCAmelCase_ :int = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Tuple = old[ """encoder/relpos_bias/rel_embedding""" ].T lowerCAmelCase_ :List[str] = old["""encoder/encoder_norm/scale"""] if not is_encoder_only: # Decoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" ) lowerCAmelCase_ :List[Any] = layer_norm lowerCAmelCase_ :List[str] = k.T lowerCAmelCase_ :Any = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :Dict = v.T # Block i, layer 1 (Cross Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" ) lowerCAmelCase_ :Optional[int] = layer_norm lowerCAmelCase_ :str = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :int = v.T # Block i, layer 2 (MLP). lowerCAmelCase_ :Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Dict = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ ) lowerCAmelCase_ :List[Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :Any = wi[0].T lowerCAmelCase_ :Any = wi[1].T else: lowerCAmelCase_ :Tuple = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Optional[Any] = old["""decoder/decoder_norm/scale"""] lowerCAmelCase_ :Optional[Any] = old[ """decoder/relpos_bias/rel_embedding""" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowerCAmelCase_ :Tuple = old["""decoder/logits_dense/kernel"""].T return new def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Optional[int] = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Tuple = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) lowerCAmelCase_ :Any = state_dict["""shared.weight"""] return state_dict def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :List[Any] = checkpoints.load_tax_checkpoint(lowercase__ ) lowerCAmelCase_ :Optional[int] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ ) lowerCAmelCase_ :Union[str, Any] = make_state_dict(lowercase__ , lowercase__ ) model.load_state_dict(lowercase__ , strict=lowercase__ ) def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : bool = False ) -> Any: '''simple docstring''' lowerCAmelCase_ :Any = TaConfig.from_json_file(lowercase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowerCAmelCase_ :List[Any] = TaEncoderModel(lowercase__ ) else: lowerCAmelCase_ :List[str] = TaForConditionalGeneration(lowercase__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(lowercase__ ) # Verify that we can load the checkpoint. model.from_pretrained(lowercase__ ) print("""Done""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) __UpperCAmelCase = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
84
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
84
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( lowercase__ : Optional[Any] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): lowerCAmelCase_ :Union[str, Any] = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): lowerCAmelCase_ :Any = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase_ :List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] lowerCAmelCase_ :Tuple = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowercase__ )-1}""" ) if "norm" in key: lowerCAmelCase_ :Dict = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase_ :str = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] lowerCAmelCase_ :str = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowercase__ )-1}""" ) if "layer_norm1" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: lowerCAmelCase_ :str = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase_ :List[str] = key[key.find("""block""" ) + len("""block""" )] lowerCAmelCase_ :int = key.replace(f"""block{idx}""" , f"""block.{int(lowercase__ )-1}""" ) if "attn.q" in key: lowerCAmelCase_ :Tuple = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: lowerCAmelCase_ :Optional[int] = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: lowerCAmelCase_ :str = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: lowerCAmelCase_ :List[Any] = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: lowerCAmelCase_ :List[str] = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: lowerCAmelCase_ :str = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) lowerCAmelCase_ :Any = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase_ :str = key[key.find("""linear_c""" ) + len("""linear_c""" )] lowerCAmelCase_ :Optional[int] = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowercase__ )-1}""" ) if "bot_conv" in key: lowerCAmelCase_ :Union[str, Any] = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: lowerCAmelCase_ :int = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: lowerCAmelCase_ :str = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: lowerCAmelCase_ :Any = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: lowerCAmelCase_ :List[str] = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: lowerCAmelCase_ :Dict = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: lowerCAmelCase_ :Any = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): lowerCAmelCase_ :Tuple = key.replace("""module.last_layer_depth""" , """head.head""" ) lowerCAmelCase_ :List[Any] = value return new_state_dict def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" ) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict lowerCAmelCase_ :Optional[Any] = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase_ :Union[str, Any] = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase_ :List[Any] = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase_ :int = kv_bias[config.hidden_sizes[i] :] def _snake_case ( ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCAmelCase_ :Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return image @torch.no_grad() def _snake_case ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Dict=False , lowercase__ : List[Any]=None ) -> int: '''simple docstring''' lowerCAmelCase_ :int = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] ) # load image processor (only resize + rescale) lowerCAmelCase_ :Union[str, Any] = GLPNImageProcessor() # prepare image lowerCAmelCase_ :List[Any] = prepare_img() lowerCAmelCase_ :int = image_processor(images=lowercase__ , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict lowerCAmelCase_ :Tuple = torch.load(lowercase__ , map_location=torch.device("""cpu""" ) ) # rename keys lowerCAmelCase_ :Union[str, Any] = rename_keys(lowercase__ ) # key and value matrices need special treatment read_in_k_v(lowercase__ , lowercase__ ) # create HuggingFace model and load state dict lowerCAmelCase_ :List[Any] = GLPNForDepthEstimation(lowercase__ ) model.load_state_dict(lowercase__ ) model.eval() # forward pass lowerCAmelCase_ :Dict = model(lowercase__ ) lowerCAmelCase_ :Tuple = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: lowerCAmelCase_ :Optional[Any] = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: lowerCAmelCase_ :Any = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(f"""Unknown model name: {model_name}""" ) lowerCAmelCase_ :Union[str, Any] = torch.Size([1, 4_8_0, 6_4_0] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , lowercase__ , atol=1E-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowercase__ , ) image_processor.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowercase__ , ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) parser.add_argument( '--model_name', default='glpn-kitti', type=str, help='Name of the model in case you\'re pushing to the hub.', ) __UpperCAmelCase = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
84
1
"""simple docstring""" class _SCREAMING_SNAKE_CASE : def __init__( self ) -> int: lowerCAmelCase_ :List[str] = """""" lowerCAmelCase_ :List[Any] = """""" lowerCAmelCase_ :Union[str, Any] = [] def __lowerCAmelCase ( self , __A , __A ) -> int: if m == -1: return n + 1 elif n == -1: return m + 1 elif self.dp[m][n] > -1: return self.dp[m][n] else: if self.worda[m] == self.worda[n]: lowerCAmelCase_ :List[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 ) else: lowerCAmelCase_ :int = self.__min_dist_top_down_dp(__A , n - 1 ) lowerCAmelCase_ :Dict = self.__min_dist_top_down_dp(m - 1 , __A ) lowerCAmelCase_ :Optional[int] = self.__min_dist_top_down_dp(m - 1 , n - 1 ) lowerCAmelCase_ :Tuple = 1 + min(__A , __A , __A ) return self.dp[m][n] def __lowerCAmelCase ( self , __A , __A ) -> int: lowerCAmelCase_ :List[str] = worda lowerCAmelCase_ :int = worda lowerCAmelCase_ :int = [[-1 for _ in range(len(__A ) )] for _ in range(len(__A ) )] return self.__min_dist_top_down_dp(len(__A ) - 1 , len(__A ) - 1 ) def __lowerCAmelCase ( self , __A , __A ) -> int: lowerCAmelCase_ :Optional[Any] = worda lowerCAmelCase_ :Dict = worda lowerCAmelCase_ :str = len(__A ) lowerCAmelCase_ :Union[str, Any] = len(__A ) lowerCAmelCase_ :List[str] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )] for i in range(m + 1 ): for j in range(n + 1 ): if i == 0: # first string is empty lowerCAmelCase_ :int = j elif j == 0: # second string is empty lowerCAmelCase_ :int = i elif worda[i - 1] == worda[j - 1]: # last characters are equal lowerCAmelCase_ :Union[str, Any] = self.dp[i - 1][j - 1] else: lowerCAmelCase_ :Union[str, Any] = self.dp[i][j - 1] lowerCAmelCase_ :Tuple = self.dp[i - 1][j] lowerCAmelCase_ :Dict = self.dp[i - 1][j - 1] lowerCAmelCase_ :Tuple = 1 + min(__A , __A , __A ) return self.dp[m][n] if __name__ == "__main__": __UpperCAmelCase = EditDistance() print('****************** Testing Edit Distance DP Algorithm ******************') print() __UpperCAmelCase = input('Enter the first string: ').strip() __UpperCAmelCase = input('Enter the second string: ').strip() print() print(F"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""") print(F"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""") print() print('*************** End of Testing Edit Distance DP Algorithm ***************')
84
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
84
1
"""simple docstring""" import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json', 'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json', 'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Dict = "owlvit_text_model" def __init__( self , __A=4_9408 , __A=512 , __A=2048 , __A=12 , __A=8 , __A=16 , __A="quick_gelu" , __A=1E-5 , __A=0.0 , __A=0.0_2 , __A=1.0 , __A=0 , __A=4_9406 , __A=4_9407 , **__A , ) -> Union[str, Any]: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :List[str] = vocab_size lowerCAmelCase_ :Dict = hidden_size lowerCAmelCase_ :str = intermediate_size lowerCAmelCase_ :str = num_hidden_layers lowerCAmelCase_ :int = num_attention_heads lowerCAmelCase_ :Any = max_position_embeddings lowerCAmelCase_ :List[str] = hidden_act lowerCAmelCase_ :str = layer_norm_eps lowerCAmelCase_ :Any = attention_dropout lowerCAmelCase_ :str = initializer_range lowerCAmelCase_ :str = initializer_factor @classmethod def __lowerCAmelCase ( cls , __A , **__A ) -> "PretrainedConfig": cls._set_token_in_kwargs(__A ) lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = cls.get_config_dict(__A , **__A ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": lowerCAmelCase_ :Optional[int] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__A , **__A ) class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "owlvit_vision_model" def __init__( self , __A=768 , __A=3072 , __A=12 , __A=12 , __A=3 , __A=768 , __A=32 , __A="quick_gelu" , __A=1E-5 , __A=0.0 , __A=0.0_2 , __A=1.0 , **__A , ) -> Any: super().__init__(**__A ) lowerCAmelCase_ :int = hidden_size lowerCAmelCase_ :Optional[int] = intermediate_size lowerCAmelCase_ :Dict = num_hidden_layers lowerCAmelCase_ :int = num_attention_heads lowerCAmelCase_ :Optional[int] = num_channels lowerCAmelCase_ :Any = image_size lowerCAmelCase_ :Union[str, Any] = patch_size lowerCAmelCase_ :Optional[int] = hidden_act lowerCAmelCase_ :int = layer_norm_eps lowerCAmelCase_ :Tuple = attention_dropout lowerCAmelCase_ :Tuple = initializer_range lowerCAmelCase_ :Dict = initializer_factor @classmethod def __lowerCAmelCase ( cls , __A , **__A ) -> "PretrainedConfig": cls._set_token_in_kwargs(__A ) lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": lowerCAmelCase_ :List[Any] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__A , **__A ) class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "owlvit" UpperCAmelCase_ :int = True def __init__( self , __A=None , __A=None , __A=512 , __A=2.6_5_9_2 , __A=True , **__A , ) -> Tuple: super().__init__(**__A ) if text_config is None: lowerCAmelCase_ :Optional[Any] = {} logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" ) if vision_config is None: lowerCAmelCase_ :List[Any] = {} logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" ) lowerCAmelCase_ :Tuple = OwlViTTextConfig(**__A ) lowerCAmelCase_ :Dict = OwlViTVisionConfig(**__A ) lowerCAmelCase_ :Any = projection_dim lowerCAmelCase_ :int = logit_scale_init_value lowerCAmelCase_ :List[str] = return_dict lowerCAmelCase_ :Optional[int] = 1.0 @classmethod def __lowerCAmelCase ( cls , __A , **__A ) -> "PretrainedConfig": cls._set_token_in_kwargs(__A ) lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = cls.get_config_dict(__A , **__A ) if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__A , **__A ) @classmethod def __lowerCAmelCase ( cls , __A , __A , **__A ) -> Optional[int]: lowerCAmelCase_ :Optional[int] = {} lowerCAmelCase_ :int = text_config lowerCAmelCase_ :Dict = vision_config return cls.from_dict(__A , **__A ) def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :List[Any] = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ :str = self.text_config.to_dict() lowerCAmelCase_ :Tuple = self.vision_config.to_dict() lowerCAmelCase_ :Optional[int] = self.__class__.model_type return output class _SCREAMING_SNAKE_CASE ( A__ ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ] ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""logits_per_image""", {0: """batch"""}), ("""logits_per_text""", {0: """batch"""}), ("""text_embeds""", {0: """batch"""}), ("""image_embeds""", {0: """batch"""}), ] ) @property def __lowerCAmelCase ( self ) -> float: return 1E-4 def __lowerCAmelCase ( self , __A , __A = -1 , __A = -1 , __A = None , ) -> Mapping[str, Any]: lowerCAmelCase_ :Tuple = super().generate_dummy_inputs( processor.tokenizer , batch_size=__A , seq_length=__A , framework=__A ) lowerCAmelCase_ :Tuple = super().generate_dummy_inputs( processor.image_processor , batch_size=__A , framework=__A ) return {**text_input_dict, **image_input_dict} @property def __lowerCAmelCase ( self ) -> int: return 14
84
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json', # See all LeViT models at https://huggingface.co/models?filter=levit } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "levit" def __init__( self , __A=224 , __A=3 , __A=3 , __A=2 , __A=1 , __A=16 , __A=[128, 256, 384] , __A=[4, 8, 12] , __A=[4, 4, 4] , __A=[16, 16, 16] , __A=0 , __A=[2, 2, 2] , __A=[2, 2, 2] , __A=0.0_2 , **__A , ) -> Any: super().__init__(**__A ) lowerCAmelCase_ :Tuple = image_size lowerCAmelCase_ :Optional[int] = num_channels lowerCAmelCase_ :Union[str, Any] = kernel_size lowerCAmelCase_ :Optional[Any] = stride lowerCAmelCase_ :Optional[int] = padding lowerCAmelCase_ :Optional[Any] = hidden_sizes lowerCAmelCase_ :Optional[int] = num_attention_heads lowerCAmelCase_ :int = depths lowerCAmelCase_ :List[str] = key_dim lowerCAmelCase_ :str = drop_path_rate lowerCAmelCase_ :Optional[int] = patch_size lowerCAmelCase_ :Union[str, Any] = attention_ratio lowerCAmelCase_ :Dict = mlp_ratio lowerCAmelCase_ :Any = initializer_range lowerCAmelCase_ :Optional[int] = [ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Tuple = version.parse("1.11" ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __lowerCAmelCase ( self ) -> float: return 1E-4
84
1
"""simple docstring""" import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class _SCREAMING_SNAKE_CASE ( A__ ): def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :int = tempfile.mkdtemp() lowerCAmelCase_ :Any = 5 # Realm tok lowerCAmelCase_ :List[str] = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """test""", """question""", """this""", """is""", """the""", """first""", """second""", """third""", """fourth""", """fifth""", """record""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] lowerCAmelCase_ :Dict = os.path.join(self.tmpdirname , """realm_tokenizer""" ) os.makedirs(__A , exist_ok=__A ) lowerCAmelCase_ :Optional[Any] = os.path.join(__A , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) lowerCAmelCase_ :Union[str, Any] = os.path.join(self.tmpdirname , """realm_block_records""" ) os.makedirs(__A , exist_ok=__A ) def __lowerCAmelCase ( self ) -> RealmTokenizer: return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) ) def __lowerCAmelCase ( self ) -> Union[str, Any]: shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :Union[str, Any] = RealmConfig(num_block_records=self.num_block_records ) return config def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :int = Dataset.from_dict( { """id""": ["""0""", """1"""], """question""": ["""foo""", """bar"""], """answers""": [["""Foo""", """Bar"""], ["""Bar"""]], } ) return dataset def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Optional[Any] = np.array( [ b"""This is the first record""", b"""This is the second record""", b"""This is the third record""", b"""This is the fourth record""", b"""This is the fifth record""", b"""This is a longer longer longer record""", ] , dtype=__A , ) return block_records def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Optional[Any] = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Optional[int] = self.get_config() lowerCAmelCase_ :Optional[int] = self.get_dummy_retriever() lowerCAmelCase_ :Tuple = retriever.tokenizer lowerCAmelCase_ :List[Any] = np.array([0, 3] , dtype="""long""" ) lowerCAmelCase_ :Dict = tokenizer(["""Test question"""] ).input_ids lowerCAmelCase_ :List[str] = tokenizer( ["""the fourth"""] , add_special_tokens=__A , return_token_type_ids=__A , return_attention_mask=__A , ).input_ids lowerCAmelCase_ :Optional[Any] = config.reader_seq_len lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Any = retriever( __A , __A , answer_ids=__A , max_length=__A , return_tensors="""np""" ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , ) def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Optional[int] = self.get_config() lowerCAmelCase_ :int = self.get_dummy_retriever() lowerCAmelCase_ :Union[str, Any] = retriever.tokenizer lowerCAmelCase_ :Dict = np.array([0, 3, 5] , dtype="""long""" ) lowerCAmelCase_ :Dict = tokenizer(["""Test question"""] ).input_ids lowerCAmelCase_ :Any = tokenizer( ["""the fourth""", """longer longer"""] , add_special_tokens=__A , return_token_type_ids=__A , return_attention_mask=__A , ).input_ids lowerCAmelCase_ :Tuple = config.reader_seq_len lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :int = retriever( __A , __A , answer_ids=__A , max_length=__A , return_tensors="""np""" ) self.assertEqual([False, True, True] , __A ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __A ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __A ) def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Optional[int] = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) ) # Test local path lowerCAmelCase_ :Union[str, Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) ) self.assertEqual(retriever.block_records[0] , b"""This is the first record""" ) # Test mocked remote path with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download: lowerCAmelCase_ :Union[str, Any] = os.path.join( os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME ) lowerCAmelCase_ :List[str] = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" ) self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
84
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def _snake_case ( lowercase__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = FileLock(str(tmpdir / """foo.lock""" ) ) lowerCAmelCase_ :Union[str, Any] = FileLock(str(tmpdir / """foo.lock""" ) ) lowerCAmelCase_ :Dict = 0.01 with locka.acquire(): with pytest.raises(lowercase__ ): lowerCAmelCase_ :List[Any] = time.time() locka.acquire(lowercase__ ) assert time.time() - _start > timeout def _snake_case ( lowercase__ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = """a""" * 1_0_0_0 + """.lock""" lowerCAmelCase_ :Optional[Any] = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(lowercase__ ) assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5 lowerCAmelCase_ :Any = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(lowercase__ ): locka.acquire(0 )
84
1
"""simple docstring""" from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
84
"""simple docstring""" from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __UpperCAmelCase = 1.054571817e-34 # unit of ℏ : J * s __UpperCAmelCase = 3e8 # unit of c : m * s^-1 def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> dict[str, float]: '''simple docstring''' if (force, area, distance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if force < 0: raise ValueError("""Magnitude of force can not be negative""" ) if distance < 0: raise ValueError("""Distance can not be negative""" ) if area < 0: raise ValueError("""Area can not be negative""" ) if force == 0: lowerCAmelCase_ :Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_4_0 * (distance) ** 4 ) return {"force": force} elif area == 0: lowerCAmelCase_ :Optional[Any] = (2_4_0 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: lowerCAmelCase_ :Any = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("""One and only one argument must be 0""" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
84
1
"""simple docstring""" def _snake_case ( lowercase__ : str ) -> int: '''simple docstring''' assert column_title.isupper() lowerCAmelCase_ :Dict = 0 lowerCAmelCase_ :int = len(lowercase__ ) - 1 lowerCAmelCase_ :Optional[Any] = 0 while index >= 0: lowerCAmelCase_ :str = (ord(column_title[index] ) - 6_4) * pow(2_6 , lowercase__ ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
84
"""simple docstring""" def _snake_case ( lowercase__ : str , lowercase__ : str ) -> int: '''simple docstring''' if len(lowercase__ ) != len(lowercase__ ): raise ValueError("""String lengths must match!""" ) lowerCAmelCase_ :Optional[int] = 0 for chara, chara in zip(lowercase__ , lowercase__ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
84
1
"""simple docstring""" import os def _snake_case ( lowercase__ : str = "matrix.txt" ) -> int: '''simple docstring''' with open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) as in_file: lowerCAmelCase_ :str = in_file.read() lowerCAmelCase_ :Tuple = [[int(lowercase__ ) for cell in row.split(""",""" )] for row in data.strip().splitlines()] lowerCAmelCase_ :Tuple = [[0 for cell in row] for row in grid] lowerCAmelCase_ :str = len(grid[0] ) lowerCAmelCase_ :Union[str, Any] = [[0 for i in range(lowercase__ )] for j in range(lowercase__ )] lowerCAmelCase_ :Optional[Any] = grid[0][0] for i in range(1 , lowercase__ ): lowerCAmelCase_ :Optional[int] = grid[0][i] + dp[0][i - 1] for i in range(1 , lowercase__ ): lowerCAmelCase_ :str = grid[i][0] + dp[i - 1][0] for i in range(1 , lowercase__ ): for j in range(1 , lowercase__ ): lowerCAmelCase_ :Dict = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] ) return dp[-1][-1] if __name__ == "__main__": print(F"""{solution() = }""")
84
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A ) -> Optional[Any]: super().__init__() lowerCAmelCase_ :int = nn.ModuleList(__A ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A = None , __A = None , __A = None , __A = None , __A = False , __A = True , ) -> Union[ControlNetOutput, Tuple]: for i, (image, scale, controlnet) in enumerate(zip(__A , __A , self.nets ) ): lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = controlnet( __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) # merge samples if i == 0: lowerCAmelCase_ , lowerCAmelCase_ :Tuple = down_samples, mid_sample else: lowerCAmelCase_ :str = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__A , __A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __lowerCAmelCase ( self , __A , __A = True , __A = None , __A = False , __A = None , ) -> Optional[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = save_directory for controlnet in self.nets: controlnet.save_pretrained( __A , is_main_process=__A , save_function=__A , safe_serialization=__A , variant=__A , ) idx += 1 lowerCAmelCase_ :Any = model_path_to_save + f"""_{idx}""" @classmethod def __lowerCAmelCase ( cls , __A , **__A ) -> List[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... lowerCAmelCase_ :List[Any] = pretrained_model_path while os.path.isdir(__A ): lowerCAmelCase_ :Tuple = ControlNetModel.from_pretrained(__A , **__A ) controlnets.append(__A ) idx += 1 lowerCAmelCase_ :Dict = pretrained_model_path + f"""_{idx}""" logger.info(f"""{len(__A )} controlnets loaded from {pretrained_model_path}.""" ) if len(__A ) == 0: raise ValueError( f"""No ControlNets found under {os.path.dirname(__A )}. Expected at least {pretrained_model_path + "_0"}.""" ) return cls(__A )
84
1
"""simple docstring""" def _snake_case ( lowercase__ : str ) -> str: '''simple docstring''' if not all(char in """01""" for char in bin_string ): raise ValueError("""Non-binary value was passed to the function""" ) if not bin_string: raise ValueError("""Empty string was passed to the function""" ) lowerCAmelCase_ :Optional[int] = """""" while len(lowercase__ ) % 3 != 0: lowerCAmelCase_ :str = """0""" + bin_string lowerCAmelCase_ :Optional[int] = [ bin_string[index : index + 3] for index in range(len(lowercase__ ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: lowerCAmelCase_ :Tuple = 0 for index, val in enumerate(lowercase__ ): oct_val += int(2 ** (2 - index) * int(lowercase__ ) ) oct_string += str(lowercase__ ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
84
"""simple docstring""" from PIL import Image def _snake_case ( lowercase__ : Image , lowercase__ : float ) -> Image: '''simple docstring''' def brightness(lowercase__ : int ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(lowercase__ ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 __UpperCAmelCase = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
84
1
"""simple docstring""" def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Dict ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Any = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = 0 while b > 0: if b & 1: lowerCAmelCase_ :str = ((res % c) + (a % c)) % c a += a b >>= 1 return res
84
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class _SCREAMING_SNAKE_CASE : def __lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) lowerCAmelCase_ :int = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :List[Any] = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase_ :str = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) lowerCAmelCase_ :int = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ :Dict = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase_ :str = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[int] = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Dict = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Dict = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Any = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Optional[int] = inputs["""prompt"""] lowerCAmelCase_ :Optional[int] = inputs["""generator"""] lowerCAmelCase_ :Any = inputs["""num_inference_steps"""] lowerCAmelCase_ :Optional[int] = inputs["""output_type"""] if "image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""image"""] else: lowerCAmelCase_ :int = None if "mask_image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""mask_image"""] else: lowerCAmelCase_ :int = None if "original_image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""original_image"""] else: lowerCAmelCase_ :List[Any] = None lowerCAmelCase_ , lowerCAmelCase_ :int = pipe.encode_prompt(__A ) # inputs with prompt converted to embeddings lowerCAmelCase_ :List[str] = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: lowerCAmelCase_ :int = image if mask_image is not None: lowerCAmelCase_ :Tuple = mask_image if original_image is not None: lowerCAmelCase_ :Optional[Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(__A , __A , __A ) lowerCAmelCase_ :Optional[int] = pipe(**__A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__A ) lowerCAmelCase_ :Optional[int] = self.pipeline_class.from_pretrained(__A ) pipe_loaded.to(__A ) pipe_loaded.set_progress_bar_config(disable=__A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(__A , __A ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) lowerCAmelCase_ :Dict = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Union[str, Any] = inputs["""generator"""] lowerCAmelCase_ :Any = inputs["""num_inference_steps"""] lowerCAmelCase_ :Tuple = inputs["""output_type"""] # inputs with prompt converted to embeddings lowerCAmelCase_ :Tuple = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: lowerCAmelCase_ :Optional[int] = image if mask_image is not None: lowerCAmelCase_ :str = mask_image if original_image is not None: lowerCAmelCase_ :Tuple = original_image lowerCAmelCase_ :Union[str, Any] = pipe_loaded(**__A )[0] lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max() self.assertLess(__A , 1E-4 ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Any = self.get_dummy_components() lowerCAmelCase_ :Optional[int] = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[int] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Dict = pipe(**__A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__A ) lowerCAmelCase_ :Any = self.pipeline_class.from_pretrained(__A ) pipe_loaded.to(__A ) pipe_loaded.set_progress_bar_config(disable=__A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests lowerCAmelCase_ :List[Any] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :str = pipe_loaded(**__A )[0] lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max() self.assertLess(__A , 1E-4 )
84
1
"""simple docstring""" import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __UpperCAmelCase = 'platform' import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Optional[Any]=None , lowercase__ : Optional[int]=None , lowercase__ : int=None , lowercase__ : List[Any]=None , lowercase__ : Optional[int]=None , lowercase__ : Dict=None , ) -> int: '''simple docstring''' if attention_mask is None: lowerCAmelCase_ :Union[str, Any] = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: lowerCAmelCase_ :List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: lowerCAmelCase_ :Dict = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCAmelCase_ :Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCAmelCase_ :Any = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class _SCREAMING_SNAKE_CASE : def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=False , __A=99 , __A=16 , __A=2 , __A=4 , __A=4 , __A="gelu" , __A=0.1 , __A=0.1 , __A=32 , __A=2 , __A=1 , __A=0 , __A=0.0_2 , ) -> Optional[int]: lowerCAmelCase_ :Optional[Any] = parent lowerCAmelCase_ :Optional[Any] = batch_size lowerCAmelCase_ :Union[str, Any] = seq_length lowerCAmelCase_ :Dict = is_training lowerCAmelCase_ :Any = use_labels lowerCAmelCase_ :Union[str, Any] = vocab_size lowerCAmelCase_ :Tuple = hidden_size lowerCAmelCase_ :int = num_hidden_layers lowerCAmelCase_ :List[Any] = num_attention_heads lowerCAmelCase_ :int = intermediate_size lowerCAmelCase_ :Any = hidden_act lowerCAmelCase_ :int = hidden_dropout_prob lowerCAmelCase_ :Union[str, Any] = attention_probs_dropout_prob lowerCAmelCase_ :Optional[int] = max_position_embeddings lowerCAmelCase_ :Optional[Any] = eos_token_id lowerCAmelCase_ :List[Any] = pad_token_id lowerCAmelCase_ :int = bos_token_id lowerCAmelCase_ :Any = initializer_range def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Union[str, Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) lowerCAmelCase_ :str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) lowerCAmelCase_ :List[Any] = shift_tokens_right(__A , 1 , 2 ) lowerCAmelCase_ :Optional[int] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__A , ) lowerCAmelCase_ :Dict = prepare_blenderbot_inputs_dict(__A , __A , __A ) return config, inputs_dict def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = self.prepare_config_and_inputs() return config, inputs_dict def __lowerCAmelCase ( self , __A , __A , __A ) -> Union[str, Any]: lowerCAmelCase_ :int = 20 lowerCAmelCase_ :Tuple = model_class_name(__A ) lowerCAmelCase_ :int = model.encode(inputs_dict["""input_ids"""] ) lowerCAmelCase_ , lowerCAmelCase_ :str = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) lowerCAmelCase_ :Any = model.init_cache(decoder_input_ids.shape[0] , __A , __A ) lowerCAmelCase_ :Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) lowerCAmelCase_ :Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCAmelCase_ :Optional[Any] = model.decode( decoder_input_ids[:, :-1] , __A , decoder_attention_mask=__A , past_key_values=__A , decoder_position_ids=__A , ) lowerCAmelCase_ :Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) lowerCAmelCase_ :int = model.decode( decoder_input_ids[:, -1:] , __A , decoder_attention_mask=__A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__A , ) lowerCAmelCase_ :Any = model.decode(__A , __A ) lowerCAmelCase_ :str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" ) def __lowerCAmelCase ( self , __A , __A , __A ) -> str: lowerCAmelCase_ :Union[str, Any] = 20 lowerCAmelCase_ :List[Any] = model_class_name(__A ) lowerCAmelCase_ :Any = model.encode(inputs_dict["""input_ids"""] ) lowerCAmelCase_ , lowerCAmelCase_ :Any = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) lowerCAmelCase_ :str = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) lowerCAmelCase_ :Optional[int] = model.init_cache(decoder_input_ids.shape[0] , __A , __A ) lowerCAmelCase_ :str = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCAmelCase_ :Optional[Any] = model.decode( decoder_input_ids[:, :-1] , __A , decoder_attention_mask=__A , past_key_values=__A , decoder_position_ids=__A , ) lowerCAmelCase_ :Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) lowerCAmelCase_ :str = model.decode( decoder_input_ids[:, -1:] , __A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__A , decoder_position_ids=__A , ) lowerCAmelCase_ :Optional[int] = model.decode(__A , __A , decoder_attention_mask=__A ) lowerCAmelCase_ :List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" ) @require_flax class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): UpperCAmelCase_ :Any = 99 def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Optional[Any] = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) lowerCAmelCase_ :int = input_ids.shape[0] lowerCAmelCase_ :List[str] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Dict = self._get_config_and_data() lowerCAmelCase_ :Tuple = FlaxBlenderbotSmallForConditionalGeneration(__A ) lowerCAmelCase_ :Union[str, Any] = lm_model(input_ids=__A ) lowerCAmelCase_ :Optional[int] = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["""logits"""].shape , __A ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Optional[int] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) lowerCAmelCase_ :str = FlaxBlenderbotSmallForConditionalGeneration(__A ) lowerCAmelCase_ :List[str] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) lowerCAmelCase_ :List[str] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) lowerCAmelCase_ :Dict = lm_model(input_ids=__A , decoder_input_ids=__A ) lowerCAmelCase_ :List[Any] = (*summary.shape, config.vocab_size) self.assertEqual(outputs["""logits"""].shape , __A ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Optional[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) lowerCAmelCase_ :int = shift_tokens_right(__A , 1 , 2 ) lowerCAmelCase_ :Tuple = np.equal(__A , 1 ).astype(np.floataa ).sum() lowerCAmelCase_ :List[Any] = np.equal(__A , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(__A , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase , A__ ): UpperCAmelCase_ :Dict = True UpperCAmelCase_ :int = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) UpperCAmelCase_ :int = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Tuple = FlaxBlenderbotSmallModelTester(self ) def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__A , __A , __A ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ , lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__A , __A , __A ) def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase_ :Optional[int] = self._prepare_for_class(__A , __A ) lowerCAmelCase_ :Optional[int] = model_class(__A ) @jax.jit def encode_jitted(__A , __A=None , **__A ): return model.encode(input_ids=__A , attention_mask=__A ) with self.subTest("""JIT Enabled""" ): lowerCAmelCase_ :str = encode_jitted(**__A ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): lowerCAmelCase_ :Dict = encode_jitted(**__A ).to_tuple() self.assertEqual(len(__A ) , len(__A ) ) for jitted_output, output in zip(__A , __A ): self.assertEqual(jitted_output.shape , output.shape ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase_ :Any = model_class(__A ) lowerCAmelCase_ :List[str] = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) lowerCAmelCase_ :str = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(__A , __A , __A ): return model.decode( decoder_input_ids=__A , decoder_attention_mask=__A , encoder_outputs=__A , ) with self.subTest("""JIT Enabled""" ): lowerCAmelCase_ :Dict = decode_jitted(**__A ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): lowerCAmelCase_ :Optional[Any] = decode_jitted(**__A ).to_tuple() self.assertEqual(len(__A ) , len(__A ) ) for jitted_output, output in zip(__A , __A ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __lowerCAmelCase ( self ) -> Optional[int]: for model_class_name in self.all_model_classes: lowerCAmelCase_ :Optional[Any] = model_class_name.from_pretrained("""facebook/blenderbot_small-90M""" ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids lowerCAmelCase_ :List[Any] = np.ones((1, 1) ) * model.config.eos_token_id lowerCAmelCase_ :Any = model(__A ) self.assertIsNotNone(__A )
84
"""simple docstring""" import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) lowerCAmelCase_ :int = """A painting of a squirrel eating a burger""" lowerCAmelCase_ :List[Any] = jax.device_count() lowerCAmelCase_ :Optional[Any] = num_samples * [prompt] lowerCAmelCase_ :int = sd_pipe.prepare_inputs(__A ) lowerCAmelCase_ :Optional[Any] = replicate(__A ) lowerCAmelCase_ :Union[str, Any] = shard(__A ) lowerCAmelCase_ :Optional[Any] = jax.random.PRNGKey(0 ) lowerCAmelCase_ :Tuple = jax.random.split(__A , jax.device_count() ) lowerCAmelCase_ :Union[str, Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowerCAmelCase_ :Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1] lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCAmelCase_ :Optional[int] = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Union[str, Any] = """stabilityai/stable-diffusion-2""" lowerCAmelCase_ , lowerCAmelCase_ :Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__A , subfolder="""scheduler""" ) lowerCAmelCase_ , lowerCAmelCase_ :List[str] = FlaxStableDiffusionPipeline.from_pretrained( __A , scheduler=__A , revision="""bf16""" , dtype=jnp.bfloataa , ) lowerCAmelCase_ :Optional[int] = scheduler_params lowerCAmelCase_ :List[Any] = """A painting of a squirrel eating a burger""" lowerCAmelCase_ :Tuple = jax.device_count() lowerCAmelCase_ :str = num_samples * [prompt] lowerCAmelCase_ :Union[str, Any] = sd_pipe.prepare_inputs(__A ) lowerCAmelCase_ :Tuple = replicate(__A ) lowerCAmelCase_ :Optional[int] = shard(__A ) lowerCAmelCase_ :List[str] = jax.random.PRNGKey(0 ) lowerCAmelCase_ :List[Any] = jax.random.split(__A , jax.device_count() ) lowerCAmelCase_ :Optional[Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowerCAmelCase_ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1] lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCAmelCase_ :Dict = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
84
1
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __UpperCAmelCase = 16 __UpperCAmelCase = 32 def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 , lowercase__ : str = "bert-base-cased" ) -> Any: '''simple docstring''' lowerCAmelCase_ :Optional[int] = AutoTokenizer.from_pretrained(lowercase__ ) lowerCAmelCase_ :int = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowercase__ : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase_ :str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase_ :List[str] = datasets.map( lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase_ :int = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowercase__ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" ) return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. lowerCAmelCase_ :int = DataLoader( tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) lowerCAmelCase_ :List[Any] = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) return train_dataloader, eval_dataloader def _snake_case ( lowercase__ : Optional[int] , lowercase__ : List[Any] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase_ :Optional[int] = config["""lr"""] lowerCAmelCase_ :str = int(config["""num_epochs"""] ) lowerCAmelCase_ :Any = int(config["""seed"""] ) lowerCAmelCase_ :Optional[int] = int(config["""batch_size"""] ) lowerCAmelCase_ :List[Any] = args.model_name_or_path set_seed(lowercase__ ) lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(lowercase__ , lowercase__ , lowercase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase_ :Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ ) # Instantiate optimizer lowerCAmelCase_ :List[Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowerCAmelCase_ :Dict = optimizer_cls(params=model.parameters() , lr=lowercase__ ) if accelerator.state.deepspeed_plugin is not None: lowerCAmelCase_ :List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: lowerCAmelCase_ :List[Any] = 1 lowerCAmelCase_ :List[Any] = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowerCAmelCase_ :List[str] = get_linear_schedule_with_warmup( optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , ) else: lowerCAmelCase_ :Dict = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = accelerator.prepare( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # We need to keep track of how many total steps we have iterated over lowerCAmelCase_ :List[Any] = 0 # We also need to keep track of the stating epoch so files are named properly lowerCAmelCase_ :Tuple = 0 # Now we train the model lowerCAmelCase_ :Any = evaluate.load("""glue""" , """mrpc""" ) lowerCAmelCase_ :Any = 0 lowerCAmelCase_ :int = {} for epoch in range(lowercase__ , lowercase__ ): model.train() for step, batch in enumerate(lowercase__ ): lowerCAmelCase_ :Optional[Any] = model(**lowercase__ ) lowerCAmelCase_ :int = outputs.loss lowerCAmelCase_ :Optional[Any] = loss / gradient_accumulation_steps accelerator.backward(lowercase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() lowerCAmelCase_ :Dict = 0 for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase_ :Union[str, Any] = model(**lowercase__ ) lowerCAmelCase_ :Tuple = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times lowerCAmelCase_ , lowerCAmelCase_ :Tuple = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(lowercase__ ) - 1: lowerCAmelCase_ :Optional[int] = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowerCAmelCase_ :Any = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=lowercase__ , references=lowercase__ , ) lowerCAmelCase_ :Dict = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , lowercase__ ) lowerCAmelCase_ :int = eval_metric["""accuracy"""] if best_performance < eval_metric["accuracy"]: lowerCAmelCase_ :Dict = eval_metric["""accuracy"""] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}""" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) def _snake_case ( ) -> str: '''simple docstring''' lowerCAmelCase_ :Dict = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , ) parser.add_argument( """--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--performance_lower_bound""" , type=lowercase__ , default=lowercase__ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , ) parser.add_argument( """--num_epochs""" , type=lowercase__ , default=3 , help="""Number of train epochs.""" , ) lowerCAmelCase_ :Tuple = parser.parse_args() lowerCAmelCase_ :Optional[int] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6} training_function(lowercase__ , lowercase__ ) if __name__ == "__main__": main()
84
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def _snake_case ( ) -> Generator[int, None, None]: '''simple docstring''' lowerCAmelCase_ :dict[int, int] = {} lowerCAmelCase_ :int = 2 while True: lowerCAmelCase_ :List[Any] = factor_map.pop(lowercase__ , lowercase__ ) if factor: lowerCAmelCase_ :Optional[int] = factor + prime while x in factor_map: x += factor lowerCAmelCase_ :List[str] = factor else: lowerCAmelCase_ :Optional[int] = prime yield prime prime += 1 def _snake_case ( lowercase__ : float = 1E10 ) -> int: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = sieve() lowerCAmelCase_ :str = 1 while True: lowerCAmelCase_ :int = next(lowercase__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(lowercase__ ) n += 2 if __name__ == "__main__": print(solution())
84
1
"""simple docstring""" from __future__ import annotations def _snake_case ( lowercase__ : str , lowercase__ : list[str] | None = None , lowercase__ : dict[str, float] | None = None , lowercase__ : bool = False , ) -> tuple[int, float, str]: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = cipher_alphabet or [chr(lowercase__ ) for i in range(9_7 , 1_2_3 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowerCAmelCase_ :Optional[Any] = { """a""": 0.08497, """b""": 0.01492, """c""": 0.02202, """d""": 0.04253, """e""": 0.11162, """f""": 0.02228, """g""": 0.02015, """h""": 0.06094, """i""": 0.07546, """j""": 0.00153, """k""": 0.01292, """l""": 0.04025, """m""": 0.02406, """n""": 0.06749, """o""": 0.07507, """p""": 0.01929, """q""": 0.00095, """r""": 0.07587, """s""": 0.06327, """t""": 0.09356, """u""": 0.02758, """v""": 0.00978, """w""": 0.02560, """x""": 0.00150, """y""": 0.01994, """z""": 0.00077, } else: # Custom frequencies dictionary lowerCAmelCase_ :List[str] = frequencies_dict if not case_sensitive: lowerCAmelCase_ :Optional[Any] = ciphertext.lower() # Chi squared statistic values lowerCAmelCase_ :dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(lowercase__ ) ): lowerCAmelCase_ :Tuple = """""" # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowerCAmelCase_ :Any = (alphabet_letters.index(letter.lower() ) - shift) % len( lowercase__ ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowerCAmelCase_ :Optional[Any] = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowerCAmelCase_ :List[str] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowerCAmelCase_ :Tuple = decrypted_with_shift.lower().count(lowercase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCAmelCase_ :List[Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCAmelCase_ :Optional[Any] = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowerCAmelCase_ :List[Any] = decrypted_with_shift.count(lowercase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCAmelCase_ :str = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCAmelCase_ :Tuple = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowerCAmelCase_ :Dict = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(lowercase__ : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowerCAmelCase_ :int = min( lowercase__ , key=lowercase__ , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) :Union[str, Any] = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
84
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): # TODO: is there an appropriate internal test set? UpperCAmelCase_ :List[Any] = "ssube/stable-diffusion-x4-upscaler-onnx" def __lowerCAmelCase ( self , __A=0 ) -> Optional[int]: lowerCAmelCase_ :Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__A ) ) lowerCAmelCase_ :List[Any] = torch.manual_seed(__A ) lowerCAmelCase_ :Tuple = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Dict = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :int = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :int = self.get_dummy_inputs() lowerCAmelCase_ :List[str] = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :str = np.array( [0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Union[str, Any] = pipe(**__A ).images lowerCAmelCase_ :Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Tuple = np.array( [0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Union[str, Any] = self.get_dummy_inputs() lowerCAmelCase_ :Optional[Any] = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Tuple = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Dict = pipe(**__A ).images lowerCAmelCase_ :Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Dict = np.array( [0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @property def __lowerCAmelCase ( self ) -> List[Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Optional[int] = ort.SessionOptions() lowerCAmelCase_ :Dict = False return options def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase_ :Optional[Any] = init_image.resize((128, 128) ) # using the PNDM scheduler by default lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Union[str, Any] = """A fantasy landscape, trending on artstation""" lowerCAmelCase_ :List[Any] = torch.manual_seed(0 ) lowerCAmelCase_ :str = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :Dict = output.images lowerCAmelCase_ :List[str] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase_ :Optional[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase_ :List[str] = init_image.resize((128, 128) ) lowerCAmelCase_ :Any = LMSDiscreteScheduler.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" ) lowerCAmelCase_ :Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Any = """A fantasy landscape, trending on artstation""" lowerCAmelCase_ :Optional[Any] = torch.manual_seed(0 ) lowerCAmelCase_ :List[str] = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=20 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :int = output.images lowerCAmelCase_ :List[Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase_ :Union[str, Any] = np.array( [0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
84
1
"""simple docstring""" import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu __UpperCAmelCase = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json' with io.open(filename, 'r', encoding='utf-8') as f: __UpperCAmelCase = json.load(f) @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self , __A ) -> int: return FSMTTokenizer.from_pretrained(__A ) def __lowerCAmelCase ( self , __A ) -> Union[str, Any]: lowerCAmelCase_ :List[str] = FSMTForConditionalGeneration.from_pretrained(__A ).to(__A ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["""en-ru""", 2_6.0], ["""ru-en""", 2_2.0], ["""en-de""", 2_2.0], ["""de-en""", 2_9.0], ] ) @slow def __lowerCAmelCase ( self , __A , __A ) -> Tuple: # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality lowerCAmelCase_ :List[str] = f"""facebook/wmt19-{pair}""" lowerCAmelCase_ :Optional[Any] = self.get_tokenizer(__A ) lowerCAmelCase_ :Tuple = self.get_model(__A ) lowerCAmelCase_ :List[str] = bleu_data[pair]["""src"""] lowerCAmelCase_ :List[str] = bleu_data[pair]["""tgt"""] lowerCAmelCase_ :Optional[Any] = tokenizer(__A , return_tensors="""pt""" , truncation=__A , padding="""longest""" ).to(__A ) lowerCAmelCase_ :Optional[int] = model.generate( input_ids=batch.input_ids , num_beams=8 , ) lowerCAmelCase_ :List[str] = tokenizer.batch_decode( __A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A ) lowerCAmelCase_ :List[str] = calculate_bleu(__A , __A ) print(__A ) self.assertGreaterEqual(scores["""bleu"""] , __A )
84
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Dict: if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=__A , ) assert hasattr(self , """env""" ) def __lowerCAmelCase ( self , __A ) -> Any: # configuration for running training on smdistributed Model Parallel lowerCAmelCase_ :Union[str, Any] = { """enabled""": True, """processes_per_host""": 8, } lowerCAmelCase_ :Tuple = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCAmelCase_ :Any = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCAmelCase_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__A , instance_type=self.instance_type , debugger_hook_config=__A , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=__A , py_version="""py36""" , ) def __lowerCAmelCase ( self , __A ) -> List[Any]: TrainingJobAnalytics(__A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def __lowerCAmelCase ( self , __A ) -> List[str]: # create estimator lowerCAmelCase_ :Any = self.create_estimator(__A ) # run training estimator.fit() # result dataframe lowerCAmelCase_ :Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase_ :List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase_ :Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase_ :Optional[int] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __A )
84
1
"""simple docstring""" import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = 1_0 lowerCAmelCase_ :Optional[int] = datasets.Features( { """tokens""": datasets.Sequence(datasets.Value("""string""" ) ), """labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ), """answers""": datasets.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), """id""": datasets.Value("""int64""" ), } ) lowerCAmelCase_ :int = datasets.Dataset.from_dict( { """tokens""": [["""foo"""] * 5] * n, """labels""": [[1] * 5] * n, """answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0, """id""": list(range(lowercase__ ) ), } , features=lowercase__ , ) return dataset @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : int ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return filename # FILE_CONTENT + files __UpperCAmelCase = '\\n Text data.\n Second line of data.' @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> str: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt""" lowerCAmelCase_ :List[Any] = FILE_CONTENT with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> Tuple: '''simple docstring''' import bza lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2""" lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] ) -> Dict: '''simple docstring''' import gzip lowerCAmelCase_ :int = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" ) lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with gzip.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' if datasets.config.LZ4_AVAILABLE: import lza.frame lowerCAmelCase_ :List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4""" lowerCAmelCase_ :int = bytes(lowercase__ , """utf-8""" ) with lza.frame.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[int] ) -> Any: '''simple docstring''' if datasets.config.PY7ZR_AVAILABLE: import pyazr lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z""" with pyazr.SevenZipFile(lowercase__ , """w""" ) as archive: archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' import tarfile lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> str: '''simple docstring''' import lzma lowerCAmelCase_ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz""" lowerCAmelCase_ :Optional[Any] = bytes(lowercase__ , """utf-8""" ) with lzma.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import zipfile lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Tuple: '''simple docstring''' if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst""" lowerCAmelCase_ :Any = bytes(lowercase__ , """utf-8""" ) with zstd.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """file.xml""" lowerCAmelCase_ :Any = textwrap.dedent( """\ <?xml version=\"1.0\" encoding=\"UTF-8\" ?> <tmx version=\"1.4\"> <header segtype=\"sentence\" srclang=\"ca\" /> <body> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv> </tu> </body> </tmx>""" ) with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename __UpperCAmelCase = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __UpperCAmelCase = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __UpperCAmelCase = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __UpperCAmelCase = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __UpperCAmelCase = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Union[str, Any]: '''simple docstring''' return DATA_DICT_OF_LISTS @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Any: '''simple docstring''' lowerCAmelCase_ :Tuple = datasets.Dataset.from_dict(lowercase__ ) lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" ) with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con: lowerCAmelCase_ :Union[str, Any] = con.cursor() cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" ) for item in DATA: cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> int: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Optional[int] = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Any: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Dict = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Dict ) -> Union[str, Any]: '''simple docstring''' import bza lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2""" with open(lowercase__ , """rb""" ) as f: lowerCAmelCase_ :Union[str, Any] = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) ) f.write(lowercase__ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : str ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" ) lowerCAmelCase_ :Optional[Any] = pa.schema( { """col_1""": pa.string(), """col_2""": pa.intaa(), """col_3""": pa.floataa(), } ) with open(lowercase__ , """wb""" ) as f: lowerCAmelCase_ :Optional[int] = pq.ParquetWriter(lowercase__ , schema=lowercase__ ) lowerCAmelCase_ :List[str] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ ) writer.write_table(lowercase__ ) writer.close() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Union[str, Any] = {"""data""": DATA} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Optional[Any] = {"""data""": DATA_DICT_OF_LISTS} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_312: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_STR: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int , lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : List[Any] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[str] ) -> int: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : List[str] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : str , lowercase__ : List[str] , lowercase__ : int ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :str = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Dict = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.abc""" with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : List[str] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename("""unsupported.ext""" ) ) f.write(lowercase__ , arcname=os.path.basename("""unsupported_2.ext""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] ) lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" ) with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> int: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Tuple: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : Tuple ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace(""".jpg""" , """2.jpg""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data_dir""" ) (data_dir / "subdir").mkdir() with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden file with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) return data_dir
84
"""simple docstring""" def _snake_case ( lowercase__ : int = 1_0 ) -> str: '''simple docstring''' if not isinstance(lowercase__ , lowercase__ ) or n < 0: raise ValueError("""Invalid input""" ) lowerCAmelCase_ :List[str] = 1_0**n lowerCAmelCase_ :int = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , lowercase__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"""{solution(10) = }""")
84
1
"""simple docstring""" from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _snake_case ( ) -> Dict: '''simple docstring''' import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join lowerCAmelCase_ :List[str] = """__test_patch_submodule_mock__""" with patch_submodule(_test_patching , """os.path.join""" , lowercase__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _snake_case ( ) -> List[Any]: '''simple docstring''' assert _test_patching.open is open lowerCAmelCase_ :Optional[Any] = """__test_patch_submodule_builtin_mock__""" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , """open""" , lowercase__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _snake_case ( ) -> int: '''simple docstring''' lowerCAmelCase_ :List[str] = """__test_patch_submodule_missing_mock__""" with patch_submodule(_test_patching , """pandas.read_csv""" , lowercase__ ): pass def _snake_case ( ) -> str: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = """__test_patch_submodule_missing_builtin_mock__""" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , """len""" , lowercase__ ) is None with patch_submodule(_test_patching , """len""" , lowercase__ ): assert _test_patching.len is mock assert _test_patching.len is len def _snake_case ( ) -> Dict: '''simple docstring''' lowerCAmelCase_ :str = """__test_patch_submodule_start_and_stop_mock__""" lowerCAmelCase_ :List[str] = patch_submodule(_test_patching , """open""" , lowercase__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _snake_case ( ) -> int: '''simple docstring''' from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join lowerCAmelCase_ :Optional[int] = """__test_patch_submodule_successive_join__""" lowerCAmelCase_ :Union[str, Any] = """__test_patch_submodule_successive_dirname__""" lowerCAmelCase_ :Tuple = """__test_patch_submodule_successive_rename__""" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , """os.path.join""" , lowercase__ ): with patch_submodule(_test_patching , """os.rename""" , lowercase__ ): with patch_submodule(_test_patching , """os.path.dirname""" , lowercase__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , """os.rename""" , lowercase__ ): with patch_submodule(_test_patching , """os.path.join""" , lowercase__ ): with patch_submodule(_test_patching , """os.path.dirname""" , lowercase__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _snake_case ( ) -> str: '''simple docstring''' lowerCAmelCase_ :int = """__test_patch_submodule_doesnt_exist_mock__""" with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , lowercase__ ): pass with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , lowercase__ ): pass
84
"""simple docstring""" import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __UpperCAmelCase = 'src/transformers' __UpperCAmelCase = 'docs/source/en/tasks' def _snake_case ( lowercase__ : str , lowercase__ : List[str] , lowercase__ : Any ) -> str: '''simple docstring''' with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCAmelCase_ :List[Any] = f.readlines() # Find the start prompt. lowerCAmelCase_ :Tuple = 0 while not lines[start_index].startswith(lowercase__ ): start_index += 1 start_index += 1 lowerCAmelCase_ :Dict = start_index while not lines[end_index].startswith(lowercase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH) __UpperCAmelCase = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __UpperCAmelCase = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def _snake_case ( lowercase__ : List[str] ) -> str: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide] lowerCAmelCase_ :List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowercase__ , set() ) lowerCAmelCase_ :Union[str, Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def _snake_case ( lowercase__ : int , lowercase__ : str=False ) -> Dict: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = _find_text_in_file( filename=os.path.join(lowercase__ , lowercase__ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , ) lowerCAmelCase_ :int = get_model_list_for_task(lowercase__ ) if current_list != new_list: if overwrite: with open(os.path.join(lowercase__ , lowercase__ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" """ to fix this.""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __UpperCAmelCase = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
84
1
"""simple docstring""" from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
84
"""simple docstring""" def _snake_case ( lowercase__ : list[int] ) -> list[list[int]]: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = [] if len(lowercase__ ) == 1: return [nums.copy()] for _ in range(len(lowercase__ ) ): lowerCAmelCase_ :Optional[Any] = nums.pop(0 ) lowerCAmelCase_ :str = permute(lowercase__ ) for perm in permutations: perm.append(lowercase__ ) result.extend(lowercase__ ) nums.append(lowercase__ ) return result def _snake_case ( lowercase__ : Tuple ) -> List[str]: '''simple docstring''' def backtrack(lowercase__ : str ): if start == len(lowercase__ ) - 1: output.append(nums[:] ) else: for i in range(lowercase__ , len(lowercase__ ) ): lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start] backtrack(start + 1 ) lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start] # backtrack lowerCAmelCase_ :int = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function __UpperCAmelCase = permutea([1, 2, 3]) print(res) doctest.testmod()
84
1
"""simple docstring""" import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS} def _snake_case ( lowercase__ : Optional[int] , lowercase__ : str , lowercase__ : Tuple , lowercase__ : List[str] ) -> Optional[int]: '''simple docstring''' if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" ) if tokenizer_name is None: lowerCAmelCase_ :Dict = TOKENIZER_CLASSES else: lowerCAmelCase_ :List[Any] = {tokenizer_name: getattr(lowercase__ , tokenizer_name + """Fast""" )} logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" ) for tokenizer_name in tokenizer_names: lowerCAmelCase_ :List[Any] = TOKENIZER_CLASSES[tokenizer_name] lowerCAmelCase_ :Union[str, Any] = True if checkpoint_name is None: lowerCAmelCase_ :str = list(tokenizer_class.max_model_input_sizes.keys() ) else: lowerCAmelCase_ :int = [checkpoint_name] logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" ) for checkpoint in checkpoint_names: logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" ) # Load tokenizer lowerCAmelCase_ :Dict = tokenizer_class.from_pretrained(lowercase__ , force_download=lowercase__ ) # Save fast tokenizer logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" ) # For organization names we create sub-directories if "/" in checkpoint: lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = checkpoint.split("""/""" ) lowerCAmelCase_ :Optional[int] = os.path.join(lowercase__ , lowercase__ ) elif add_prefix: lowerCAmelCase_ :Tuple = checkpoint lowerCAmelCase_ :Any = dump_path else: lowerCAmelCase_ :Union[str, Any] = None lowerCAmelCase_ :Any = dump_path logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: lowerCAmelCase_ :Optional[int] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] lowerCAmelCase_ :Tuple = file_path.split(lowercase__ )[-1][0] if next_char == "/": lowerCAmelCase_ :Tuple = os.path.join(lowercase__ , lowercase__ ) lowerCAmelCase_ :Union[str, Any] = None logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) lowerCAmelCase_ :Any = tokenizer.save_pretrained( lowercase__ , legacy_format=lowercase__ , filename_prefix=lowercase__ ) logger.info(f"""=> File names {file_names}""" ) for file_name in file_names: if not file_name.endswith("""tokenizer.json""" ): os.remove(lowercase__ ) logger.info(f"""=> removing {file_name}""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.' ) parser.add_argument( '--tokenizer_name', default=None, type=str, help=( F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """ 'download and convert all the checkpoints from AWS.' ), ) parser.add_argument( '--checkpoint_name', default=None, type=str, help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.', ) parser.add_argument( '--force_download', action='store_true', help='Re-download checkpoints.', ) __UpperCAmelCase = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
84
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Any = BioGptTokenizer UpperCAmelCase_ :str = False def __lowerCAmelCase ( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase_ :Optional[Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] lowerCAmelCase_ :str = dict(zip(__A , range(len(__A ) ) ) ) lowerCAmelCase_ :int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(__A ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(__A ) ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: lowerCAmelCase_ :List[Any] = """lower newer""" lowerCAmelCase_ :Tuple = """lower newer""" return input_text, output_text def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :List[str] = BioGptTokenizer(self.vocab_file , self.merges_file ) lowerCAmelCase_ :Union[str, Any] = """lower""" lowerCAmelCase_ :Any = ["""low""", """er</w>"""] lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Dict = tokens + ["""<unk>"""] lowerCAmelCase_ :List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) @slow def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) lowerCAmelCase_ :List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__A ) lowerCAmelCase_ :List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A ) lowerCAmelCase_ :Optional[int] = tokenizer.build_inputs_with_special_tokens(__A ) lowerCAmelCase_ :List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
84
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self , __A , __A=7 , __A=3 , __A=10 , __A=18 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , __A=None , ) -> int: lowerCAmelCase_ :List[Any] = size if size is not None else {"""shortest_edge""": 18} lowerCAmelCase_ :int = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} lowerCAmelCase_ :List[str] = parent lowerCAmelCase_ :Dict = batch_size lowerCAmelCase_ :List[Any] = num_channels lowerCAmelCase_ :int = num_frames lowerCAmelCase_ :str = image_size lowerCAmelCase_ :Optional[Any] = min_resolution lowerCAmelCase_ :str = max_resolution lowerCAmelCase_ :Dict = do_resize lowerCAmelCase_ :Union[str, Any] = size lowerCAmelCase_ :int = do_normalize lowerCAmelCase_ :str = image_mean lowerCAmelCase_ :Optional[int] = image_std lowerCAmelCase_ :List[str] = crop_size def __lowerCAmelCase ( self ) -> Tuple: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Optional[int] = VivitImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Optional[Any] = VivitImageProcessingTester(self ) @property def __lowerCAmelCase ( self ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , """image_mean""" ) ) self.assertTrue(hasattr(__A , """image_std""" ) ) self.assertTrue(hasattr(__A , """do_normalize""" ) ) self.assertTrue(hasattr(__A , """do_resize""" ) ) self.assertTrue(hasattr(__A , """do_center_crop""" ) ) self.assertTrue(hasattr(__A , """size""" ) ) def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) lowerCAmelCase_ :Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def __lowerCAmelCase ( self ) -> Optional[Any]: # Initialize image_processing lowerCAmelCase_ :Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos lowerCAmelCase_ :Optional[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=__A ) for video in video_inputs: self.assertIsInstance(__A , __A ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input lowerCAmelCase_ :Dict = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCAmelCase_ :int = image_processing(__A , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def __lowerCAmelCase ( self ) -> Optional[Any]: # Initialize image_processing lowerCAmelCase_ :Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase_ :List[str] = prepare_video_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for video in video_inputs: self.assertIsInstance(__A , __A ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input lowerCAmelCase_ :str = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCAmelCase_ :Optional[Any] = image_processing(__A , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def __lowerCAmelCase ( self ) -> str: # Initialize image_processing lowerCAmelCase_ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase_ :int = prepare_video_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for video in video_inputs: self.assertIsInstance(__A , __A ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input lowerCAmelCase_ :Union[str, Any] = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCAmelCase_ :List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
84
"""simple docstring""" from ...configuration_utils import PretrainedConfig class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "bert-generation" def __init__( self , __A=5_0358 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A=2 , __A=1 , __A="absolute" , __A=True , **__A , ) -> Tuple: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :Any = vocab_size lowerCAmelCase_ :List[Any] = hidden_size lowerCAmelCase_ :Optional[int] = num_hidden_layers lowerCAmelCase_ :int = num_attention_heads lowerCAmelCase_ :List[Any] = hidden_act lowerCAmelCase_ :Optional[Any] = intermediate_size lowerCAmelCase_ :List[Any] = hidden_dropout_prob lowerCAmelCase_ :int = attention_probs_dropout_prob lowerCAmelCase_ :Tuple = max_position_embeddings lowerCAmelCase_ :List[str] = initializer_range lowerCAmelCase_ :Union[str, Any] = layer_norm_eps lowerCAmelCase_ :List[str] = position_embedding_type lowerCAmelCase_ :Optional[int] = use_cache
84
1
"""simple docstring""" from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar __UpperCAmelCase = TypeVar('T') class _SCREAMING_SNAKE_CASE ( Generic[T] ): UpperCAmelCase_ :deque[T] # Cache store of keys UpperCAmelCase_ :set[T] # References of the keys in cache UpperCAmelCase_ :int = 10 # Maximum capacity of cache def __init__( self , __A ) -> None: lowerCAmelCase_ :Tuple = deque() lowerCAmelCase_ :Any = set() if not n: lowerCAmelCase_ :str = sys.maxsize elif n < 0: raise ValueError("""n should be an integer greater than 0.""" ) else: lowerCAmelCase_ :Any = n def __lowerCAmelCase ( self , __A ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: lowerCAmelCase_ :int = self.dq_store.pop() self.key_reference.remove(__A ) else: self.dq_store.remove(__A ) self.dq_store.appendleft(__A ) self.key_reference.add(__A ) def __lowerCAmelCase ( self ) -> None: for k in self.dq_store: print(__A ) def __repr__( self ) -> str: return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}""" if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = LRUCache(4) lru_cache.refer('A') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('A') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
84
"""simple docstring""" def _snake_case ( lowercase__ : List[Any] , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Any ) -> int: '''simple docstring''' lowerCAmelCase_ :int = [False] * len(lowercase__ ) lowerCAmelCase_ :str = [] queue.append(lowercase__ ) lowerCAmelCase_ :Any = True while queue: lowerCAmelCase_ :Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) lowerCAmelCase_ :Union[str, Any] = True lowerCAmelCase_ :int = u return visited[t] def _snake_case ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : str ) -> Dict: '''simple docstring''' lowerCAmelCase_ :List[Any] = [-1] * (len(lowercase__ )) lowerCAmelCase_ :str = 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): lowerCAmelCase_ :List[str] = float("""Inf""" ) lowerCAmelCase_ :List[str] = sink while s != source: # Find the minimum value in select path lowerCAmelCase_ :Any = min(lowercase__ , graph[parent[s]][s] ) lowerCAmelCase_ :Union[str, Any] = parent[s] max_flow += path_flow lowerCAmelCase_ :Tuple = sink while v != source: lowerCAmelCase_ :List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase_ :Union[str, Any] = parent[v] return max_flow __UpperCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __UpperCAmelCase , __UpperCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
84
1
"""simple docstring""" import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class _SCREAMING_SNAKE_CASE ( A__ ): def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :str = pa.array(TypedSequence([1, 2, 3] ) ) self.assertEqual(arr.type , pa.intaa() ) def __lowerCAmelCase ( self ) -> str: with self.assertRaises(__A ): lowerCAmelCase_ :Any = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() ) def __lowerCAmelCase ( self ) -> str: with self.assertRaises(__A ): lowerCAmelCase_ :Any = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) ) def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Tuple = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def __lowerCAmelCase ( self ) -> Dict: with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): lowerCAmelCase_ :Optional[int] = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) ) def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Any = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :int = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :Optional[int] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def __lowerCAmelCase ( self ) -> Union[str, Any]: with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): lowerCAmelCase_ :List[Any] = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :str = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :Union[str, Any] = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) @require_pil def __lowerCAmelCase ( self ) -> Optional[Any]: import PIL.Image lowerCAmelCase_ :Dict = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) ) with patch( """datasets.arrow_writer.cast_to_python_objects""" , side_effect=__A ) as mock_cast_to_python_objects: lowerCAmelCase_ :Dict = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) ) lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("""optimize_list_casting""" , __A ) self.assertFalse(kwargs["""optimize_list_casting"""] ) def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : int ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :str = pa.BufferReader(lowercase__ ) if isinstance(lowercase__ , pa.Buffer ) else pa.memory_map(lowercase__ ) lowerCAmelCase_ :Any = pa.ipc.open_stream(lowercase__ ) lowerCAmelCase_ :pa.Table = f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def _snake_case ( lowercase__ : int , lowercase__ : str ) -> Any: '''simple docstring''' lowerCAmelCase_ :Tuple = pa.BufferOutputStream() lowerCAmelCase_ :Any = pa.schema(lowercase__ ) if fields else None with ArrowWriter(stream=lowercase__ , schema=lowercase__ , writer_batch_size=lowercase__ ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) lowerCAmelCase_ , lowerCAmelCase_ :Any = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: lowerCAmelCase_ :str = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def _snake_case ( ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :Dict = pa.BufferOutputStream() lowerCAmelCase_ :Dict = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} ) with ArrowWriter(stream=lowercase__ , features=lowercase__ ) as writer: writer.write({"""labels""": 0} ) writer.write({"""labels""": 1} ) lowerCAmelCase_ , lowerCAmelCase_ :str = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata lowerCAmelCase_ :List[str] = pa.BufferReader(output.getvalue() ) lowerCAmelCase_ :Tuple = pa.ipc.open_stream(lowercase__ ) lowerCAmelCase_ :pa.Table = f.read_all() lowerCAmelCase_ :List[str] = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(lowercase__ ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] ) def _snake_case ( lowercase__ : Any ) -> str: '''simple docstring''' lowerCAmelCase_ :str = pa.BufferOutputStream() with ArrowWriter( stream=lowercase__ , writer_batch_size=lowercase__ , hash_salt="""split_name""" , check_duplicates=lowercase__ , ) as writer: with pytest.raises(lowercase__ ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] ) lowerCAmelCase_ , lowerCAmelCase_ :Dict = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] ) def _snake_case ( lowercase__ : Dict ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[str] = pa.BufferOutputStream() with ArrowWriter( stream=lowercase__ , writer_batch_size=lowercase__ , hash_salt="""split_name""" , check_duplicates=lowercase__ , ) as writer: with pytest.raises(lowercase__ ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1_0 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=1_0 ) lowerCAmelCase_ , lowerCAmelCase_ :int = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] ) def _snake_case ( lowercase__ : Tuple ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = pa.BufferOutputStream() with ArrowWriter( stream=lowercase__ , writer_batch_size=lowercase__ , hash_salt="""split_name""" , check_duplicates=lowercase__ , ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 ) lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Dict = pa.BufferOutputStream() lowerCAmelCase_ :str = pa.schema(lowercase__ ) if fields else None with ArrowWriter(stream=lowercase__ , schema=lowercase__ , writer_batch_size=lowercase__ ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) writer.write_batch({"""col_1""": [], """col_2""": []} ) lowerCAmelCase_ , lowerCAmelCase_ :int = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: lowerCAmelCase_ :Optional[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[Any] = pa.BufferOutputStream() lowerCAmelCase_ :Optional[Any] = pa.schema(lowercase__ ) if fields else None with ArrowWriter(stream=lowercase__ , schema=lowercase__ , writer_batch_size=lowercase__ ) as writer: writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) ) lowerCAmelCase_ , lowerCAmelCase_ :int = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: lowerCAmelCase_ :Dict = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :int = pa.BufferOutputStream() lowerCAmelCase_ :Dict = pa.schema(lowercase__ ) if fields else None with ArrowWriter(stream=lowercase__ , schema=lowercase__ , writer_batch_size=lowercase__ ) as writer: writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) ) writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) ) lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: lowerCAmelCase_ :str = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def _snake_case ( ) -> Union[str, Any]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase_ :List[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()} lowerCAmelCase_ :Any = os.path.join(lowercase__ , """test.arrow""" ) with ArrowWriter(path=lowercase__ , schema=pa.schema(lowercase__ ) ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) lowerCAmelCase_ , lowerCAmelCase_ :Dict = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata ) _check_output(lowercase__ , 1 ) def _snake_case ( lowercase__ : Optional[int] ) -> int: '''simple docstring''' if pa.types.is_list(lowercase__ ): return get_base_dtype(arr_type.value_type ) else: return arr_type def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' if isinstance(lst[0] , lowercase__ ): change_first_primitive_element_in_list(lst[0] , lowercase__ ) else: lowerCAmelCase_ :Dict = value @pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def _snake_case ( lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : Dict ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = pa.array(TypedSequence(lowercase__ , optimized_int_type=lowercase__ ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( """col, expected_dtype""" , [ ("""attention_mask""", pa.inta()), ("""special_tokens_mask""", pa.inta()), ("""token_type_ids""", pa.inta()), ("""input_ids""", pa.intaa()), ("""other""", pa.intaa()), ] , ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def _snake_case ( lowercase__ : int , lowercase__ : List[str] , lowercase__ : int ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = pa.array(OptimizedTypedSequence(lowercase__ , col=lowercase__ ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications lowerCAmelCase_ :int = copy.deepcopy(lowercase__ ) lowerCAmelCase_ :Tuple = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(lowercase__ , lowercase__ ) lowerCAmelCase_ :Optional[Any] = pa.array(OptimizedTypedSequence(lowercase__ , col=lowercase__ ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize("""raise_exception""" , [False, True] ) def _snake_case ( lowercase__ : int , lowercase__ : Tuple ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = str(tmp_path / """dataset-train.arrow""" ) try: with ArrowWriter(path=lowercase__ ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def _snake_case ( lowercase__ : Optional[Any] ) -> int: '''simple docstring''' lowerCAmelCase_ :Tuple = """mock://dataset-train.arrow""" with ArrowWriter(path=lowercase__ , storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs , type(lowercase__ ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) lowerCAmelCase_ , lowerCAmelCase_ :str = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(lowercase__ ) def _snake_case ( ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[int] = pa.BufferOutputStream() with ParquetWriter(stream=lowercase__ ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) lowerCAmelCase_ , lowerCAmelCase_ :int = writer.finalize() assert num_examples == 2 assert num_bytes > 0 lowerCAmelCase_ :Any = pa.BufferReader(output.getvalue() ) lowerCAmelCase_ :pa.Table = pq.read_table(lowercase__ ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("""embed_local_files""" , [False, True] ) def _snake_case ( lowercase__ : str , lowercase__ : List[Any] ) -> List[Any]: '''simple docstring''' import PIL.Image lowerCAmelCase_ :int = str(tmp_path / """test_image_rgb.jpg""" ) PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(lowercase__ , format="""png""" ) lowerCAmelCase_ :List[str] = pa.BufferOutputStream() with ParquetWriter( stream=lowercase__ , features=Features({"""image""": Image()} ) , embed_local_files=lowercase__ ) as writer: writer.write({"""image""": image_path} ) writer.finalize() lowerCAmelCase_ :str = pa.BufferReader(output.getvalue() ) lowerCAmelCase_ :pa.Table = pq.read_table(lowercase__ ) lowerCAmelCase_ :List[str] = pa_table.to_pydict() if embed_local_files: assert isinstance(out["""image"""][0]["""path"""] , lowercase__ ) with open(lowercase__ , """rb""" ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def _snake_case ( ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :Any = pa.schema([pa.field("""col_1""" , pa.string() , nullable=lowercase__ )] ) lowerCAmelCase_ :Dict = pa.BufferOutputStream() with ArrowWriter(stream=lowercase__ ) as writer: writer._build_writer(inferred_schema=lowercase__ ) assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
84
"""simple docstring""" import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = 1_0 lowerCAmelCase_ :Optional[int] = datasets.Features( { """tokens""": datasets.Sequence(datasets.Value("""string""" ) ), """labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ), """answers""": datasets.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), """id""": datasets.Value("""int64""" ), } ) lowerCAmelCase_ :int = datasets.Dataset.from_dict( { """tokens""": [["""foo"""] * 5] * n, """labels""": [[1] * 5] * n, """answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0, """id""": list(range(lowercase__ ) ), } , features=lowercase__ , ) return dataset @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : int ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return filename # FILE_CONTENT + files __UpperCAmelCase = '\\n Text data.\n Second line of data.' @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> str: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt""" lowerCAmelCase_ :List[Any] = FILE_CONTENT with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> Tuple: '''simple docstring''' import bza lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2""" lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] ) -> Dict: '''simple docstring''' import gzip lowerCAmelCase_ :int = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" ) lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with gzip.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' if datasets.config.LZ4_AVAILABLE: import lza.frame lowerCAmelCase_ :List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4""" lowerCAmelCase_ :int = bytes(lowercase__ , """utf-8""" ) with lza.frame.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[int] ) -> Any: '''simple docstring''' if datasets.config.PY7ZR_AVAILABLE: import pyazr lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z""" with pyazr.SevenZipFile(lowercase__ , """w""" ) as archive: archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' import tarfile lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> str: '''simple docstring''' import lzma lowerCAmelCase_ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz""" lowerCAmelCase_ :Optional[Any] = bytes(lowercase__ , """utf-8""" ) with lzma.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import zipfile lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Tuple: '''simple docstring''' if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst""" lowerCAmelCase_ :Any = bytes(lowercase__ , """utf-8""" ) with zstd.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """file.xml""" lowerCAmelCase_ :Any = textwrap.dedent( """\ <?xml version=\"1.0\" encoding=\"UTF-8\" ?> <tmx version=\"1.4\"> <header segtype=\"sentence\" srclang=\"ca\" /> <body> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv> </tu> </body> </tmx>""" ) with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename __UpperCAmelCase = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __UpperCAmelCase = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __UpperCAmelCase = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __UpperCAmelCase = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __UpperCAmelCase = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Union[str, Any]: '''simple docstring''' return DATA_DICT_OF_LISTS @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Any: '''simple docstring''' lowerCAmelCase_ :Tuple = datasets.Dataset.from_dict(lowercase__ ) lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" ) with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con: lowerCAmelCase_ :Union[str, Any] = con.cursor() cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" ) for item in DATA: cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> int: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Optional[int] = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Any: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Dict = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Dict ) -> Union[str, Any]: '''simple docstring''' import bza lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2""" with open(lowercase__ , """rb""" ) as f: lowerCAmelCase_ :Union[str, Any] = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) ) f.write(lowercase__ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : str ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" ) lowerCAmelCase_ :Optional[Any] = pa.schema( { """col_1""": pa.string(), """col_2""": pa.intaa(), """col_3""": pa.floataa(), } ) with open(lowercase__ , """wb""" ) as f: lowerCAmelCase_ :Optional[int] = pq.ParquetWriter(lowercase__ , schema=lowercase__ ) lowerCAmelCase_ :List[str] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ ) writer.write_table(lowercase__ ) writer.close() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Union[str, Any] = {"""data""": DATA} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Optional[Any] = {"""data""": DATA_DICT_OF_LISTS} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_312: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_STR: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int , lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : List[Any] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[str] ) -> int: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : List[str] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : str , lowercase__ : List[str] , lowercase__ : int ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :str = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Dict = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.abc""" with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : List[str] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename("""unsupported.ext""" ) ) f.write(lowercase__ , arcname=os.path.basename("""unsupported_2.ext""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] ) lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" ) with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> int: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Tuple: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : Tuple ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace(""".jpg""" , """2.jpg""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data_dir""" ) (data_dir / "subdir").mkdir() with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden file with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) return data_dir
84
1
"""simple docstring""" from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _SCREAMING_SNAKE_CASE : def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=2 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.0_2 , __A=3 , __A=4 , __A=None , ) -> Union[str, Any]: lowerCAmelCase_ :str = parent lowerCAmelCase_ :str = 13 lowerCAmelCase_ :Tuple = 7 lowerCAmelCase_ :Tuple = True lowerCAmelCase_ :List[str] = True lowerCAmelCase_ :int = True lowerCAmelCase_ :str = True lowerCAmelCase_ :List[str] = 99 lowerCAmelCase_ :Tuple = 384 lowerCAmelCase_ :Any = 2 lowerCAmelCase_ :int = 4 lowerCAmelCase_ :Any = 37 lowerCAmelCase_ :Any = """gelu""" lowerCAmelCase_ :Tuple = 0.1 lowerCAmelCase_ :str = 0.1 lowerCAmelCase_ :Dict = 512 lowerCAmelCase_ :Dict = 16 lowerCAmelCase_ :Tuple = 2 lowerCAmelCase_ :List[Any] = 0.0_2 lowerCAmelCase_ :Optional[int] = 3 lowerCAmelCase_ :Tuple = 4 lowerCAmelCase_ :Tuple = 128 lowerCAmelCase_ :Any = 2 lowerCAmelCase_ :Optional[Any] = 9 lowerCAmelCase_ :List[str] = 1 lowerCAmelCase_ :List[str] = None def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ :int = None if self.use_input_mask: lowerCAmelCase_ :str = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase_ :List[str] = None if self.use_token_type_ids: lowerCAmelCase_ :str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase_ :Union[str, Any] = None lowerCAmelCase_ :int = None lowerCAmelCase_ :Optional[Any] = None if self.use_labels: lowerCAmelCase_ :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase_ :str = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase_ :List[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__A , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Union[str, Any]: lowerCAmelCase_ :Optional[int] = TFConvBertModel(config=__A ) lowerCAmelCase_ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} lowerCAmelCase_ :List[str] = [input_ids, input_mask] lowerCAmelCase_ :List[str] = model(__A ) lowerCAmelCase_ :str = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> str: lowerCAmelCase_ :List[Any] = TFConvBertForMaskedLM(config=__A ) lowerCAmelCase_ :Dict = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCAmelCase_ :Optional[int] = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> int: lowerCAmelCase_ :Any = self.num_labels lowerCAmelCase_ :List[str] = TFConvBertForSequenceClassification(config=__A ) lowerCAmelCase_ :List[Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCAmelCase_ :Dict = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Dict: lowerCAmelCase_ :Any = self.num_choices lowerCAmelCase_ :int = TFConvBertForMultipleChoice(config=__A ) lowerCAmelCase_ :List[str] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase_ :Tuple = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase_ :Dict = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase_ :Tuple = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } lowerCAmelCase_ :Any = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Union[str, Any]: lowerCAmelCase_ :Optional[Any] = self.num_labels lowerCAmelCase_ :Tuple = TFConvBertForTokenClassification(config=__A ) lowerCAmelCase_ :Union[str, Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCAmelCase_ :Dict = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[int]: lowerCAmelCase_ :str = TFConvBertForQuestionAnswering(config=__A ) lowerCAmelCase_ :List[Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCAmelCase_ :int = model(__A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :Union[str, Any] = self.prepare_config_and_inputs() ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) :List[Any] = config_and_inputs lowerCAmelCase_ :Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ): UpperCAmelCase_ :Union[str, Any] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) UpperCAmelCase_ :str = ( { "feature-extraction": TFConvBertModel, "fill-mask": TFConvBertForMaskedLM, "question-answering": TFConvBertForQuestionAnswering, "text-classification": TFConvBertForSequenceClassification, "token-classification": TFConvBertForTokenClassification, "zero-shot": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase_ :Union[str, Any] = False UpperCAmelCase_ :List[str] = False UpperCAmelCase_ :Union[str, Any] = False def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :List[str] = TFConvBertModelTester(self ) lowerCAmelCase_ :Optional[int] = ConfigTester(self , config_class=__A , hidden_size=37 ) def __lowerCAmelCase ( self ) -> Dict: self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__A ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__A ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__A ) def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__A ) def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__A ) @slow def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ :Optional[Any] = True lowerCAmelCase_ :Optional[Any] = True if hasattr(__A , """use_cache""" ): lowerCAmelCase_ :Dict = True lowerCAmelCase_ :Tuple = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) lowerCAmelCase_ :str = getattr(self.model_tester , """key_length""" , __A ) for model_class in self.all_model_classes: lowerCAmelCase_ :Tuple = self._prepare_for_class(__A , __A ) lowerCAmelCase_ :Optional[Any] = model_class(__A ) lowerCAmelCase_ :List[str] = len(model(__A ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__A , saved_model=__A ) lowerCAmelCase_ :Optional[int] = os.path.join(__A , """saved_model""" , """1""" ) lowerCAmelCase_ :int = tf.keras.models.load_model(__A ) lowerCAmelCase_ :Optional[int] = model(__A ) if self.is_encoder_decoder: lowerCAmelCase_ :Optional[int] = outputs["""encoder_hidden_states"""] lowerCAmelCase_ :int = outputs["""encoder_attentions"""] else: lowerCAmelCase_ :int = outputs["""hidden_states"""] lowerCAmelCase_ :Optional[int] = outputs["""attentions"""] self.assertEqual(len(__A ) , __A ) lowerCAmelCase_ :Union[str, Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(__A ) , __A ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) self.assertIsNotNone(__A ) def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ :List[str] = True lowerCAmelCase_ :List[Any] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length ) lowerCAmelCase_ :Dict = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) lowerCAmelCase_ :Tuple = getattr(self.model_tester , """key_length""" , __A ) lowerCAmelCase_ :List[Any] = getattr(self.model_tester , """key_length""" , __A ) def check_decoder_attentions_output(__A ): lowerCAmelCase_ :List[str] = len(__A ) self.assertEqual(out_len % 2 , 0 ) lowerCAmelCase_ :Tuple = outputs.decoder_attentions self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(__A ): lowerCAmelCase_ :List[str] = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: lowerCAmelCase_ :Optional[int] = True lowerCAmelCase_ :Tuple = False lowerCAmelCase_ :Optional[int] = model_class(__A ) lowerCAmelCase_ :Union[str, Any] = model(self._prepare_for_class(__A , __A ) ) lowerCAmelCase_ :Any = len(__A ) self.assertEqual(config.output_hidden_states , __A ) check_encoder_attentions_output(__A ) if self.is_encoder_decoder: lowerCAmelCase_ :Any = model_class(__A ) lowerCAmelCase_ :Optional[Any] = model(self._prepare_for_class(__A , __A ) ) self.assertEqual(config.output_hidden_states , __A ) check_decoder_attentions_output(__A ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] lowerCAmelCase_ :List[str] = True lowerCAmelCase_ :Union[str, Any] = model_class(__A ) lowerCAmelCase_ :Tuple = model(self._prepare_for_class(__A , __A ) ) self.assertEqual(config.output_hidden_states , __A ) check_encoder_attentions_output(__A ) # Check attention is always last and order is fine lowerCAmelCase_ :Union[str, Any] = True lowerCAmelCase_ :Tuple = True lowerCAmelCase_ :List[Any] = model_class(__A ) lowerCAmelCase_ :Tuple = model(self._prepare_for_class(__A , __A ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__A ) ) self.assertEqual(model.config.output_hidden_states , __A ) check_encoder_attentions_output(__A ) @require_tf class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Optional[int] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) lowerCAmelCase_ :List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCAmelCase_ :List[Any] = model(__A )[0] lowerCAmelCase_ :Tuple = [1, 6, 768] self.assertEqual(output.shape , __A ) lowerCAmelCase_ :List[Any] = tf.constant( [ [ [-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2], [0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4], [0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __A , atol=1E-4 )
84
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Optional[Any] = "data2vec-text" def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.0_2 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> Tuple: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :Dict = vocab_size lowerCAmelCase_ :Dict = hidden_size lowerCAmelCase_ :int = num_hidden_layers lowerCAmelCase_ :List[Any] = num_attention_heads lowerCAmelCase_ :Any = hidden_act lowerCAmelCase_ :Optional[int] = intermediate_size lowerCAmelCase_ :str = hidden_dropout_prob lowerCAmelCase_ :Any = attention_probs_dropout_prob lowerCAmelCase_ :str = max_position_embeddings lowerCAmelCase_ :int = type_vocab_size lowerCAmelCase_ :Tuple = initializer_range lowerCAmelCase_ :List[Any] = layer_norm_eps lowerCAmelCase_ :List[Any] = position_embedding_type lowerCAmelCase_ :List[Any] = use_cache lowerCAmelCase_ :List[Any] = classifier_dropout class _SCREAMING_SNAKE_CASE ( A__ ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCAmelCase_ :List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCAmelCase_ :List[str] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
84
1
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int __UpperCAmelCase = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class _SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ): UpperCAmelCase_ :Optional[datasets.Features] = None def _snake_case ( lowercase__ : "pyspark.sql.DataFrame" , lowercase__ : List[int] , ) -> Any: '''simple docstring''' import pyspark def generate_fn(): lowerCAmelCase_ :List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) ) for partition_id in partition_order: lowerCAmelCase_ :Optional[int] = df_with_partition_id.select("""*""" ).where(f"""part_id = {partition_id}""" ).drop("""part_id""" ) lowerCAmelCase_ :Optional[Any] = partition_df.collect() lowerCAmelCase_ :Dict = 0 for row in rows: yield f"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class _SCREAMING_SNAKE_CASE ( _BaseExamplesIterable ): def __init__( self , __A , __A=None , ) -> Optional[Any]: lowerCAmelCase_ :List[str] = df lowerCAmelCase_ :str = partition_order or range(self.df.rdd.getNumPartitions() ) lowerCAmelCase_ :int = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self ) -> Tuple: yield from self.generate_examples_fn() def __lowerCAmelCase ( self , __A ) -> "SparkExamplesIterable": lowerCAmelCase_ :List[Any] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(__A ) return SparkExamplesIterable(self.df , partition_order=__A ) def __lowerCAmelCase ( self , __A , __A ) -> "SparkExamplesIterable": lowerCAmelCase_ :Optional[Any] = self.split_shard_indices_by_worker(__A , __A ) return SparkExamplesIterable(self.df , partition_order=__A ) @property def __lowerCAmelCase ( self ) -> int: return len(self.partition_order ) class _SCREAMING_SNAKE_CASE ( datasets.DatasetBuilder ): UpperCAmelCase_ :Optional[Any] = SparkConfig def __init__( self , __A , __A = None , __A = None , **__A , ) -> int: import pyspark lowerCAmelCase_ :Tuple = pyspark.sql.SparkSession.builder.getOrCreate() lowerCAmelCase_ :Union[str, Any] = df lowerCAmelCase_ :Optional[Any] = working_dir super().__init__( cache_dir=__A , config_name=str(self.df.semanticHash() ) , **__A , ) def __lowerCAmelCase ( self ) -> int: # Returns the path of the created file. def create_cache_and_write_probe(__A ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=__A ) lowerCAmelCase_ :Union[str, Any] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(__A , """a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: lowerCAmelCase_ :int = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__A ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def __lowerCAmelCase ( self ) -> Optional[Any]: return datasets.DatasetInfo(features=self.config.features ) def __lowerCAmelCase ( self , __A ) -> Any: return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def __lowerCAmelCase ( self , __A ) -> Union[str, Any]: import pyspark def get_arrow_batch_size(__A ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) lowerCAmelCase_ :Tuple = self.df.count() lowerCAmelCase_ :Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. lowerCAmelCase_ :Tuple = ( self.df.limit(__A ) .repartition(1 ) .mapInArrow(__A , """batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) lowerCAmelCase_ :List[Any] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. lowerCAmelCase_ :str = min(__A , int(approx_total_size / max_shard_size ) ) lowerCAmelCase_ :Optional[int] = self.df.repartition(__A ) def __lowerCAmelCase ( self , __A , __A , __A , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]: import pyspark lowerCAmelCase_ :Optional[int] = ParquetWriter if file_format == """parquet""" else ArrowWriter lowerCAmelCase_ :Dict = os.path.join(self._working_dir , os.path.basename(__A ) ) if self._working_dir else fpath lowerCAmelCase_ :Optional[Any] = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. lowerCAmelCase_ :List[str] = self.config.features lowerCAmelCase_ :List[Any] = self._writer_batch_size lowerCAmelCase_ :str = self._fs.storage_options def write_arrow(__A ): # Within the same SparkContext, no two task attempts will share the same attempt ID. lowerCAmelCase_ :Dict = pyspark.TaskContext().taskAttemptId() lowerCAmelCase_ :int = next(__A , __A ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) lowerCAmelCase_ :Tuple = 0 lowerCAmelCase_ :List[str] = writer_class( features=__A , path=working_fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , writer_batch_size=__A , storage_options=__A , embed_local_files=__A , ) lowerCAmelCase_ :int = pa.Table.from_batches([first_batch] ) writer.write_table(__A ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: lowerCAmelCase_ , lowerCAmelCase_ :int = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) shard_id += 1 lowerCAmelCase_ :int = writer_class( features=writer._features , path=working_fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , writer_batch_size=__A , storage_options=__A , embed_local_files=__A , ) lowerCAmelCase_ :Any = pa.Table.from_batches([batch] ) writer.write_table(__A ) if writer._num_bytes > 0: lowerCAmelCase_ , lowerCAmelCase_ :Any = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(__A ) ): lowerCAmelCase_ :Optional[int] = os.path.join(os.path.dirname(__A ) , os.path.basename(__A ) ) shutil.move(__A , __A ) lowerCAmelCase_ :Optional[int] = ( self.df.mapInArrow(__A , """task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def __lowerCAmelCase ( self , __A , __A = "arrow" , __A = None , __A = None , **__A , ) -> Any: self._validate_cache_dir() lowerCAmelCase_ :Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(__A ) lowerCAmelCase_ :Optional[Any] = not is_remote_filesystem(self._fs ) lowerCAmelCase_ :Tuple = os.path.join if is_local else posixpath.join lowerCAmelCase_ :List[Any] = """-TTTTT-SSSSS-of-NNNNN""" lowerCAmelCase_ :int = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" lowerCAmelCase_ :Optional[Any] = path_join(self._output_dir , __A ) lowerCAmelCase_ :Dict = 0 lowerCAmelCase_ :Any = 0 lowerCAmelCase_ :str = 0 lowerCAmelCase_ :Union[str, Any] = [] lowerCAmelCase_ :List[str] = [] for task_id, content in self._prepare_split_single(__A , __A , __A ): ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) :List[Any] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(__A ) lowerCAmelCase_ :Optional[int] = total_num_examples lowerCAmelCase_ :Tuple = total_num_bytes # should rename everything at the end logger.debug(f"""Renaming {total_shards} shards.""" ) if total_shards > 1: lowerCAmelCase_ :Any = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. lowerCAmelCase_ :List[str] = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( __A , __A , __A , ): rename( __A , fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , fpath.replace("""TTTTT-SSSSS""" , f"""{global_shard_id:05d}""" ).replace("""NNNNN""" , f"""{total_shards:05d}""" ) , ) lowerCAmelCase_ :Tuple = [] lowerCAmelCase_ :Tuple = 0 for i in range(len(__A ) ): lowerCAmelCase_ , lowerCAmelCase_ :Dict = task_id_and_num_shards[i] for shard_id in range(__A ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(__A , len(__A ) ).map(lambda __A : _rename_shard(*__A ) ).collect() else: # don't use any pattern lowerCAmelCase_ :Optional[int] = 0 lowerCAmelCase_ :Optional[Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , fpath.replace(__A , """""" ) , ) def __lowerCAmelCase ( self , __A , ) -> SparkExamplesIterable: return SparkExamplesIterable(self.df )
84
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( lowercase__ : Dict , lowercase__ : Dict , lowercase__ : str , lowercase__ : Tuple="attention" ) -> str: '''simple docstring''' lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""] lowerCAmelCase_ :Union[str, Any] = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""] lowerCAmelCase_ :Any = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""] lowerCAmelCase_ :Optional[int] = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""] return k, o, q, v def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : Any=False ) -> int: '''simple docstring''' if split_mlp_wi: lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""] lowerCAmelCase_ :List[str] = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""] lowerCAmelCase_ :Tuple = (wi_a, wi_a) else: lowerCAmelCase_ :List[Any] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""] lowerCAmelCase_ :Dict = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""] return wi, wo def _snake_case ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] ) -> Tuple: '''simple docstring''' return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""] def _snake_case ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = traverse_util.flatten_dict(variables["""target"""] ) lowerCAmelCase_ :Tuple = {"""/""".join(lowercase__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowerCAmelCase_ :Any = """encoder/layers_0/mlp/wi_0/kernel""" in old print("""Split MLP:""" , lowercase__ ) lowerCAmelCase_ :List[Any] = collections.OrderedDict() # Shared embeddings. lowerCAmelCase_ :Optional[int] = old["""token_embedder/embedding"""] # Encoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" ) lowerCAmelCase_ :Optional[Any] = layer_norm lowerCAmelCase_ :Any = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Tuple = q.T lowerCAmelCase_ :str = v.T # Block i, layer 1 (MLP). lowerCAmelCase_ :Dict = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Any = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ ) lowerCAmelCase_ :Union[str, Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :List[Any] = wi[0].T lowerCAmelCase_ :Dict = wi[1].T else: lowerCAmelCase_ :int = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Tuple = old[ """encoder/relpos_bias/rel_embedding""" ].T lowerCAmelCase_ :List[str] = old["""encoder/encoder_norm/scale"""] if not is_encoder_only: # Decoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" ) lowerCAmelCase_ :List[Any] = layer_norm lowerCAmelCase_ :List[str] = k.T lowerCAmelCase_ :Any = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :Dict = v.T # Block i, layer 1 (Cross Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" ) lowerCAmelCase_ :Optional[int] = layer_norm lowerCAmelCase_ :str = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :int = v.T # Block i, layer 2 (MLP). lowerCAmelCase_ :Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Dict = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ ) lowerCAmelCase_ :List[Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :Any = wi[0].T lowerCAmelCase_ :Any = wi[1].T else: lowerCAmelCase_ :Tuple = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Optional[Any] = old["""decoder/decoder_norm/scale"""] lowerCAmelCase_ :Optional[Any] = old[ """decoder/relpos_bias/rel_embedding""" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowerCAmelCase_ :Tuple = old["""decoder/logits_dense/kernel"""].T return new def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Optional[int] = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Tuple = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) lowerCAmelCase_ :Any = state_dict["""shared.weight"""] return state_dict def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :List[Any] = checkpoints.load_tax_checkpoint(lowercase__ ) lowerCAmelCase_ :Optional[int] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ ) lowerCAmelCase_ :Union[str, Any] = make_state_dict(lowercase__ , lowercase__ ) model.load_state_dict(lowercase__ , strict=lowercase__ ) def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : bool = False ) -> Any: '''simple docstring''' lowerCAmelCase_ :Any = TaConfig.from_json_file(lowercase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowerCAmelCase_ :List[Any] = TaEncoderModel(lowercase__ ) else: lowerCAmelCase_ :List[str] = TaForConditionalGeneration(lowercase__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(lowercase__ ) # Verify that we can load the checkpoint. model.from_pretrained(lowercase__ ) print("""Done""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) __UpperCAmelCase = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
84
1
"""simple docstring""" import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __UpperCAmelCase = logging.getLogger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :List[Any] = "sequence-classification" def __init__( self , __A ) -> Any: if type(__A ) == dict: lowerCAmelCase_ :Tuple = Namespace(**__A ) lowerCAmelCase_ :Any = glue_output_modes[hparams.task] lowerCAmelCase_ :str = glue_tasks_num_labels[hparams.task] super().__init__(__A , __A , self.mode ) def __lowerCAmelCase ( self , **__A ) -> Union[str, Any]: return self.model(**__A ) def __lowerCAmelCase ( self , __A , __A ) -> Union[str, Any]: lowerCAmelCase_ :str = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCAmelCase_ :Union[str, Any] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None lowerCAmelCase_ :str = self(**__A ) lowerCAmelCase_ :Union[str, Any] = outputs[0] lowerCAmelCase_ :Dict = self.trainer.lr_schedulers[0]["""scheduler"""] lowerCAmelCase_ :Dict = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = self.hparams lowerCAmelCase_ :Union[str, Any] = processors[args.task]() lowerCAmelCase_ :str = processor.get_labels() for mode in ["train", "dev"]: lowerCAmelCase_ :List[str] = self._feature_file(__A ) if os.path.exists(__A ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , __A ) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir ) lowerCAmelCase_ :Optional[Any] = ( processor.get_dev_examples(args.data_dir ) if mode == """dev""" else processor.get_train_examples(args.data_dir ) ) lowerCAmelCase_ :Optional[int] = convert_examples_to_features( __A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("""Saving features into cached file %s""" , __A ) torch.save(__A , __A ) def __lowerCAmelCase ( self , __A , __A , __A = False ) -> DataLoader: lowerCAmelCase_ :Dict = """dev""" if mode == """test""" else mode lowerCAmelCase_ :Optional[Any] = self._feature_file(__A ) logger.info("""Loading features from cached file %s""" , __A ) lowerCAmelCase_ :Dict = torch.load(__A ) lowerCAmelCase_ :Tuple = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) lowerCAmelCase_ :int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) lowerCAmelCase_ :Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": lowerCAmelCase_ :Dict = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": lowerCAmelCase_ :Tuple = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , ) def __lowerCAmelCase ( self , __A , __A ) -> Optional[int]: lowerCAmelCase_ :Tuple = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCAmelCase_ :Optional[int] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None lowerCAmelCase_ :Union[str, Any] = self(**__A ) lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = outputs[:2] lowerCAmelCase_ :List[str] = logits.detach().cpu().numpy() lowerCAmelCase_ :Dict = inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __lowerCAmelCase ( self , __A ) -> tuple: lowerCAmelCase_ :int = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item() lowerCAmelCase_ :Optional[int] = np.concatenate([x["""pred"""] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": lowerCAmelCase_ :List[str] = np.argmax(__A , axis=1 ) elif self.hparams.glue_output_mode == "regression": lowerCAmelCase_ :Tuple = np.squeeze(__A ) lowerCAmelCase_ :List[str] = np.concatenate([x["""target"""] for x in outputs] , axis=0 ) lowerCAmelCase_ :List[str] = [[] for _ in range(out_label_ids.shape[0] )] lowerCAmelCase_ :Dict = [[] for _ in range(out_label_ids.shape[0] )] lowerCAmelCase_ :Optional[Any] = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )} lowerCAmelCase_ :Tuple = dict(results.items() ) lowerCAmelCase_ :Optional[Any] = results return ret, preds_list, out_label_list def __lowerCAmelCase ( self , __A ) -> dict: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[str] = self._eval_end(__A ) lowerCAmelCase_ :str = ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __lowerCAmelCase ( self , __A ) -> dict: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :int = self._eval_end(__A ) lowerCAmelCase_ :List[Any] = ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __lowerCAmelCase ( __A , __A ) -> int: BaseTransformer.add_model_specific_args(__A , __A ) parser.add_argument( """--max_seq_length""" , default=128 , type=__A , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--task""" , default="""""" , type=__A , required=__A , help="""The GLUE task to run""" , ) parser.add_argument( """--gpus""" , default=0 , type=__A , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) return parser def _snake_case ( ) -> int: '''simple docstring''' lowerCAmelCase_ :List[str] = argparse.ArgumentParser() add_generic_args(lowercase__ , os.getcwd() ) lowerCAmelCase_ :Any = GLUETransformer.add_model_specific_args(lowercase__ , os.getcwd() ) lowerCAmelCase_ :Union[str, Any] = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: lowerCAmelCase_ :Tuple = os.path.join( """./results""" , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , ) os.makedirs(args.output_dir ) lowerCAmelCase_ :Optional[int] = GLUETransformer(lowercase__ ) lowerCAmelCase_ :Optional[int] = generic_train(lowercase__ , lowercase__ ) # Optionally, predict on dev set and write to output_dir if args.do_predict: lowerCAmelCase_ :Optional[int] = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=lowercase__ ) ) lowerCAmelCase_ :Optional[int] = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(lowercase__ ) if __name__ == "__main__": main()
84
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( lowercase__ : Optional[Any] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): lowerCAmelCase_ :Union[str, Any] = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): lowerCAmelCase_ :Any = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase_ :List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] lowerCAmelCase_ :Tuple = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowercase__ )-1}""" ) if "norm" in key: lowerCAmelCase_ :Dict = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase_ :str = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] lowerCAmelCase_ :str = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowercase__ )-1}""" ) if "layer_norm1" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: lowerCAmelCase_ :str = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase_ :List[str] = key[key.find("""block""" ) + len("""block""" )] lowerCAmelCase_ :int = key.replace(f"""block{idx}""" , f"""block.{int(lowercase__ )-1}""" ) if "attn.q" in key: lowerCAmelCase_ :Tuple = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: lowerCAmelCase_ :Optional[int] = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: lowerCAmelCase_ :str = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: lowerCAmelCase_ :List[Any] = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: lowerCAmelCase_ :List[str] = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: lowerCAmelCase_ :str = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) lowerCAmelCase_ :Any = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase_ :str = key[key.find("""linear_c""" ) + len("""linear_c""" )] lowerCAmelCase_ :Optional[int] = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowercase__ )-1}""" ) if "bot_conv" in key: lowerCAmelCase_ :Union[str, Any] = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: lowerCAmelCase_ :int = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: lowerCAmelCase_ :str = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: lowerCAmelCase_ :Any = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: lowerCAmelCase_ :List[str] = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: lowerCAmelCase_ :Dict = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: lowerCAmelCase_ :Any = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): lowerCAmelCase_ :Tuple = key.replace("""module.last_layer_depth""" , """head.head""" ) lowerCAmelCase_ :List[Any] = value return new_state_dict def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" ) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict lowerCAmelCase_ :Optional[Any] = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase_ :Union[str, Any] = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase_ :List[Any] = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase_ :int = kv_bias[config.hidden_sizes[i] :] def _snake_case ( ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCAmelCase_ :Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return image @torch.no_grad() def _snake_case ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Dict=False , lowercase__ : List[Any]=None ) -> int: '''simple docstring''' lowerCAmelCase_ :int = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] ) # load image processor (only resize + rescale) lowerCAmelCase_ :Union[str, Any] = GLPNImageProcessor() # prepare image lowerCAmelCase_ :List[Any] = prepare_img() lowerCAmelCase_ :int = image_processor(images=lowercase__ , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict lowerCAmelCase_ :Tuple = torch.load(lowercase__ , map_location=torch.device("""cpu""" ) ) # rename keys lowerCAmelCase_ :Union[str, Any] = rename_keys(lowercase__ ) # key and value matrices need special treatment read_in_k_v(lowercase__ , lowercase__ ) # create HuggingFace model and load state dict lowerCAmelCase_ :List[Any] = GLPNForDepthEstimation(lowercase__ ) model.load_state_dict(lowercase__ ) model.eval() # forward pass lowerCAmelCase_ :Dict = model(lowercase__ ) lowerCAmelCase_ :Tuple = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: lowerCAmelCase_ :Optional[Any] = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: lowerCAmelCase_ :Any = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(f"""Unknown model name: {model_name}""" ) lowerCAmelCase_ :Union[str, Any] = torch.Size([1, 4_8_0, 6_4_0] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , lowercase__ , atol=1E-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowercase__ , ) image_processor.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowercase__ , ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) parser.add_argument( '--model_name', default='glpn-kitti', type=str, help='Name of the model in case you\'re pushing to the hub.', ) __UpperCAmelCase = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
84
1
"""simple docstring""" import argparse import math import traceback import dateutil.parser as date_parser import requests def _snake_case ( lowercase__ : Dict ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = {} lowerCAmelCase_ :Union[str, Any] = job["""started_at"""] lowerCAmelCase_ :Dict = job["""completed_at"""] lowerCAmelCase_ :Union[str, Any] = date_parser.parse(lowercase__ ) lowerCAmelCase_ :Any = date_parser.parse(lowercase__ ) lowerCAmelCase_ :List[str] = round((end_datetime - start_datetime).total_seconds() / 60.0 ) lowerCAmelCase_ :List[Any] = start lowerCAmelCase_ :Tuple = end lowerCAmelCase_ :List[str] = duration_in_min return job_info def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Dict=None ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :int = None if token is not None: lowerCAmelCase_ :Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""} lowerCAmelCase_ :int = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" lowerCAmelCase_ :Dict = requests.get(lowercase__ , headers=lowercase__ ).json() lowerCAmelCase_ :Union[str, Any] = {} try: job_time.update({job["""name"""]: extract_time_from_single_job(lowercase__ ) for job in result["""jobs"""]} ) lowerCAmelCase_ :List[Any] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 ) for i in range(lowercase__ ): lowerCAmelCase_ :Tuple = requests.get(url + f"""&page={i + 2}""" , headers=lowercase__ ).json() job_time.update({job["""name"""]: extract_time_from_single_job(lowercase__ ) for job in result["""jobs"""]} ) return job_time except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') __UpperCAmelCase = parser.parse_args() __UpperCAmelCase = get_job_time(args.workflow_run_id) __UpperCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F"""{k}: {v["duration"]}""")
84
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
84
1
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A , __A , __A , __A = None , ) -> Tuple: super().__init__() self.register_modules(transformer=__A , vae=__A , scheduler=__A ) # create a imagenet -> id dictionary for easier use lowerCAmelCase_ :str = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(""",""" ): lowerCAmelCase_ :Union[str, Any] = int(__A ) lowerCAmelCase_ :str = dict(sorted(self.labels.items() ) ) def __lowerCAmelCase ( self , __A ) -> List[int]: if not isinstance(__A , __A ): lowerCAmelCase_ :int = list(__A ) for l in label: if l not in self.labels: raise ValueError( f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , __A , __A = 4.0 , __A = None , __A = 50 , __A = "pil" , __A = True , ) -> Union[ImagePipelineOutput, Tuple]: lowerCAmelCase_ :Tuple = len(__A ) lowerCAmelCase_ :Optional[int] = self.transformer.config.sample_size lowerCAmelCase_ :Dict = self.transformer.config.in_channels lowerCAmelCase_ :str = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__A , device=self.device , dtype=self.transformer.dtype , ) lowerCAmelCase_ :str = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents lowerCAmelCase_ :Tuple = torch.tensor(__A , device=self.device ).reshape(-1 ) lowerCAmelCase_ :Dict = torch.tensor([1000] * batch_size , device=self.device ) lowerCAmelCase_ :Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(__A ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: lowerCAmelCase_ :Tuple = latent_model_input[: len(__A ) // 2] lowerCAmelCase_ :Optional[Any] = torch.cat([half, half] , dim=0 ) lowerCAmelCase_ :Tuple = self.scheduler.scale_model_input(__A , __A ) lowerCAmelCase_ :List[str] = t if not torch.is_tensor(__A ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) lowerCAmelCase_ :Dict = latent_model_input.device.type == """mps""" if isinstance(__A , __A ): lowerCAmelCase_ :Tuple = torch.floataa if is_mps else torch.floataa else: lowerCAmelCase_ :Union[str, Any] = torch.intaa if is_mps else torch.intaa lowerCAmelCase_ :str = torch.tensor([timesteps] , dtype=__A , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: lowerCAmelCase_ :Optional[int] = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowerCAmelCase_ :str = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output lowerCAmelCase_ :Optional[Any] = self.transformer( __A , timestep=__A , class_labels=__A ).sample # perform guidance if guidance_scale > 1: lowerCAmelCase_ , lowerCAmelCase_ :Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] lowerCAmelCase_ , lowerCAmelCase_ :str = torch.split(__A , len(__A ) // 2 , dim=0 ) lowerCAmelCase_ :Optional[int] = uncond_eps + guidance_scale * (cond_eps - uncond_eps) lowerCAmelCase_ :Tuple = torch.cat([half_eps, half_eps] , dim=0 ) lowerCAmelCase_ :int = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: lowerCAmelCase_ , lowerCAmelCase_ :int = torch.split(__A , __A , dim=1 ) else: lowerCAmelCase_ :List[str] = noise_pred # compute previous image: x_t -> x_t-1 lowerCAmelCase_ :Optional[Any] = self.scheduler.step(__A , __A , __A ).prev_sample if guidance_scale > 1: lowerCAmelCase_ , lowerCAmelCase_ :Tuple = latent_model_input.chunk(2 , dim=0 ) else: lowerCAmelCase_ :List[str] = latent_model_input lowerCAmelCase_ :Tuple = 1 / self.vae.config.scaling_factor * latents lowerCAmelCase_ :Optional[Any] = self.vae.decode(__A ).sample lowerCAmelCase_ :Any = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowerCAmelCase_ :List[str] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowerCAmelCase_ :Optional[int] = self.numpy_to_pil(__A ) if not return_dict: return (samples,) return ImagePipelineOutput(images=__A )
84
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json', # See all LeViT models at https://huggingface.co/models?filter=levit } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "levit" def __init__( self , __A=224 , __A=3 , __A=3 , __A=2 , __A=1 , __A=16 , __A=[128, 256, 384] , __A=[4, 8, 12] , __A=[4, 4, 4] , __A=[16, 16, 16] , __A=0 , __A=[2, 2, 2] , __A=[2, 2, 2] , __A=0.0_2 , **__A , ) -> Any: super().__init__(**__A ) lowerCAmelCase_ :Tuple = image_size lowerCAmelCase_ :Optional[int] = num_channels lowerCAmelCase_ :Union[str, Any] = kernel_size lowerCAmelCase_ :Optional[Any] = stride lowerCAmelCase_ :Optional[int] = padding lowerCAmelCase_ :Optional[Any] = hidden_sizes lowerCAmelCase_ :Optional[int] = num_attention_heads lowerCAmelCase_ :int = depths lowerCAmelCase_ :List[str] = key_dim lowerCAmelCase_ :str = drop_path_rate lowerCAmelCase_ :Optional[int] = patch_size lowerCAmelCase_ :Union[str, Any] = attention_ratio lowerCAmelCase_ :Dict = mlp_ratio lowerCAmelCase_ :Any = initializer_range lowerCAmelCase_ :Optional[int] = [ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Tuple = version.parse("1.11" ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __lowerCAmelCase ( self ) -> float: return 1E-4
84
1
"""simple docstring""" import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @property def __lowerCAmelCase ( self ) -> List[Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :str = ort.SessionOptions() lowerCAmelCase_ :int = False return options def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Any = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) lowerCAmelCase_ :List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) lowerCAmelCase_ :List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained( """runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Dict = """A red cat sitting on a park bench""" lowerCAmelCase_ :int = np.random.RandomState(0 ) lowerCAmelCase_ :Dict = pipe( prompt=__A , image=__A , mask_image=__A , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :Dict = output.images lowerCAmelCase_ :Tuple = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase_ :str = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) lowerCAmelCase_ :int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) lowerCAmelCase_ :List[str] = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" ) lowerCAmelCase_ :str = OnnxStableDiffusionInpaintPipeline.from_pretrained( """runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=__A , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[Any] = """A red cat sitting on a park bench""" lowerCAmelCase_ :str = np.random.RandomState(0 ) lowerCAmelCase_ :Dict = pipe( prompt=__A , image=__A , mask_image=__A , guidance_scale=7.5 , num_inference_steps=20 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :Any = output.images lowerCAmelCase_ :Any = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase_ :Optional[int] = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
84
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def _snake_case ( lowercase__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = FileLock(str(tmpdir / """foo.lock""" ) ) lowerCAmelCase_ :Union[str, Any] = FileLock(str(tmpdir / """foo.lock""" ) ) lowerCAmelCase_ :Dict = 0.01 with locka.acquire(): with pytest.raises(lowercase__ ): lowerCAmelCase_ :List[Any] = time.time() locka.acquire(lowercase__ ) assert time.time() - _start > timeout def _snake_case ( lowercase__ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = """a""" * 1_0_0_0 + """.lock""" lowerCAmelCase_ :Optional[Any] = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(lowercase__ ) assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5 lowerCAmelCase_ :Any = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(lowercase__ ): locka.acquire(0 )
84
1
"""simple docstring""" from typing import List from .keymap import KEYMAP, get_character def _snake_case ( lowercase__ : str ) -> Optional[Any]: '''simple docstring''' def decorator(lowercase__ : Any ): lowerCAmelCase_ :Optional[Any] = getattr(lowercase__ , """handle_key""" , [] ) handle += [key] setattr(lowercase__ , """handle_key""" , lowercase__ ) return func return decorator def _snake_case ( *lowercase__ : List[str] ) -> List[str]: '''simple docstring''' def decorator(lowercase__ : List[str] ): lowerCAmelCase_ :List[str] = getattr(lowercase__ , """handle_key""" , [] ) handle += keys setattr(lowercase__ , """handle_key""" , lowercase__ ) return func return decorator class _SCREAMING_SNAKE_CASE ( A__ ): def __new__( cls , __A , __A , __A ) -> Union[str, Any]: lowerCAmelCase_ :Tuple = super().__new__(cls , __A , __A , __A ) if not hasattr(__A , """key_handler""" ): setattr(__A , """key_handler""" , {} ) setattr(__A , """handle_input""" , KeyHandler.handle_input ) for value in attrs.values(): lowerCAmelCase_ :str = getattr(__A , """handle_key""" , [] ) for key in handled_keys: lowerCAmelCase_ :str = value return new_cls @staticmethod def __lowerCAmelCase ( cls ) -> str: lowerCAmelCase_ :Optional[int] = get_character() if char != KEYMAP["undefined"]: lowerCAmelCase_ :Optional[Any] = ord(__A ) lowerCAmelCase_ :int = cls.key_handler.get(__A ) if handler: lowerCAmelCase_ :Any = char return handler(cls ) else: return None def _snake_case ( cls : Dict ) -> Tuple: '''simple docstring''' return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
84
"""simple docstring""" from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __UpperCAmelCase = 1.054571817e-34 # unit of ℏ : J * s __UpperCAmelCase = 3e8 # unit of c : m * s^-1 def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> dict[str, float]: '''simple docstring''' if (force, area, distance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if force < 0: raise ValueError("""Magnitude of force can not be negative""" ) if distance < 0: raise ValueError("""Distance can not be negative""" ) if area < 0: raise ValueError("""Area can not be negative""" ) if force == 0: lowerCAmelCase_ :Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_4_0 * (distance) ** 4 ) return {"force": force} elif area == 0: lowerCAmelCase_ :Optional[Any] = (2_4_0 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: lowerCAmelCase_ :Any = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("""One and only one argument must be 0""" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
84
1
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Tuple = LEDTokenizer UpperCAmelCase_ :Tuple = LEDTokenizerFast UpperCAmelCase_ :Optional[Any] = True def __lowerCAmelCase ( self ) -> Optional[Any]: super().setUp() lowerCAmelCase_ :Optional[Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] lowerCAmelCase_ :List[Any] = dict(zip(__A , range(len(__A ) ) ) ) lowerCAmelCase_ :Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] lowerCAmelCase_ :Optional[int] = {"""unk_token""": """<unk>"""} lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__A ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__A ) ) def __lowerCAmelCase ( self , **__A ) -> Dict: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__A ) def __lowerCAmelCase ( self , **__A ) -> Any: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__A ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: return "lower newer", "lower newer" @cached_property def __lowerCAmelCase ( self ) -> str: return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def __lowerCAmelCase ( self ) -> Tuple: return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] lowerCAmelCase_ :Optional[Any] = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase_ :Dict = tokenizer(__A , max_length=len(__A ) , padding=__A , return_tensors="""pt""" ) self.assertIsInstance(__A , __A ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowerCAmelCase_ :Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(__A , __A ) @require_torch def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase_ :Optional[Any] = tokenizer(__A , padding=__A , return_tensors="""pt""" ) self.assertIn("""input_ids""" , __A ) self.assertIn("""attention_mask""" , __A ) self.assertNotIn("""labels""" , __A ) self.assertNotIn("""decoder_attention_mask""" , __A ) @require_torch def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Union[str, Any] = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase_ :Optional[int] = tokenizer(text_target=__A , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def __lowerCAmelCase ( self ) -> Tuple: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase_ :Union[str, Any] = tokenizer( ["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__A , truncation=__A , return_tensors="""pt""" ) self.assertIsInstance(__A , __A ) self.assertEqual(batch.input_ids.shape , (2, 5122) ) @require_torch def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Any = ["""A long paragraph for summarization."""] lowerCAmelCase_ :Union[str, Any] = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase_ :Any = tokenizer(__A , return_tensors="""pt""" ) lowerCAmelCase_ :Union[str, Any] = tokenizer(text_target=__A , return_tensors="""pt""" ) lowerCAmelCase_ :Any = inputs["""input_ids"""] lowerCAmelCase_ :Dict = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def __lowerCAmelCase ( self ) -> str: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase_ :Optional[int] = ["""Summary of the text.""", """Another summary."""] lowerCAmelCase_ :List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] lowerCAmelCase_ :Optional[int] = tokenizer(__A , padding=__A ) lowerCAmelCase_ :Optional[Any] = [[0] * len(__A ) for x in encoded_output["""input_ids"""]] lowerCAmelCase_ :Dict = tokenizer.pad(__A ) self.assertSequenceEqual(outputs["""global_attention_mask"""] , __A ) def __lowerCAmelCase ( self ) -> int: pass def __lowerCAmelCase ( self ) -> Optional[int]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCAmelCase_ :int = self.rust_tokenizer_class.from_pretrained(__A , **__A ) lowerCAmelCase_ :Optional[Any] = self.tokenizer_class.from_pretrained(__A , **__A ) lowerCAmelCase_ :Tuple = """A, <mask> AllenNLP sentence.""" lowerCAmelCase_ :Optional[Any] = tokenizer_r.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A ) lowerCAmelCase_ :List[Any] = tokenizer_p.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) lowerCAmelCase_ :str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) lowerCAmelCase_ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( __A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( __A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
84
"""simple docstring""" def _snake_case ( lowercase__ : str , lowercase__ : str ) -> int: '''simple docstring''' if len(lowercase__ ) != len(lowercase__ ): raise ValueError("""String lengths must match!""" ) lowerCAmelCase_ :Optional[int] = 0 for chara, chara in zip(lowercase__ , lowercase__ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
84
1
"""simple docstring""" class _SCREAMING_SNAKE_CASE : # Public class to implement a graph def __init__( self , __A , __A , __A ) -> None: lowerCAmelCase_ :List[str] = row lowerCAmelCase_ :Tuple = col lowerCAmelCase_ :str = graph def __lowerCAmelCase ( self , __A , __A , __A ) -> bool: return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def __lowerCAmelCase ( self , __A , __A , __A ) -> None: # Checking all 8 elements surrounding nth element lowerCAmelCase_ :int = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order lowerCAmelCase_ :Tuple = [-1, 0, 1, -1, 1, -1, 0, 1] lowerCAmelCase_ :int = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __A ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , __A ) def __lowerCAmelCase ( self ) -> int: # And finally, count all islands. lowerCAmelCase_ :int = [[False for j in range(self.COL )] for i in range(self.ROW )] lowerCAmelCase_ :Optional[int] = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(__A , __A , __A ) count += 1 return count
84
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A ) -> Optional[Any]: super().__init__() lowerCAmelCase_ :int = nn.ModuleList(__A ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A = None , __A = None , __A = None , __A = None , __A = False , __A = True , ) -> Union[ControlNetOutput, Tuple]: for i, (image, scale, controlnet) in enumerate(zip(__A , __A , self.nets ) ): lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = controlnet( __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) # merge samples if i == 0: lowerCAmelCase_ , lowerCAmelCase_ :Tuple = down_samples, mid_sample else: lowerCAmelCase_ :str = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__A , __A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __lowerCAmelCase ( self , __A , __A = True , __A = None , __A = False , __A = None , ) -> Optional[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = save_directory for controlnet in self.nets: controlnet.save_pretrained( __A , is_main_process=__A , save_function=__A , safe_serialization=__A , variant=__A , ) idx += 1 lowerCAmelCase_ :Any = model_path_to_save + f"""_{idx}""" @classmethod def __lowerCAmelCase ( cls , __A , **__A ) -> List[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... lowerCAmelCase_ :List[Any] = pretrained_model_path while os.path.isdir(__A ): lowerCAmelCase_ :Tuple = ControlNetModel.from_pretrained(__A , **__A ) controlnets.append(__A ) idx += 1 lowerCAmelCase_ :Dict = pretrained_model_path + f"""_{idx}""" logger.info(f"""{len(__A )} controlnets loaded from {pretrained_model_path}.""" ) if len(__A ) == 0: raise ValueError( f"""No ControlNets found under {os.path.dirname(__A )}. Expected at least {pretrained_model_path + "_0"}.""" ) return cls(__A )
84
1
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
84
"""simple docstring""" from PIL import Image def _snake_case ( lowercase__ : Image , lowercase__ : float ) -> Image: '''simple docstring''' def brightness(lowercase__ : int ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(lowercase__ ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 __UpperCAmelCase = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
84
1
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece_bpe.model") class lowercase_ ( lowercase , unittest.TestCase ): '''simple docstring''' __snake_case = BartphoTokenizer __snake_case = False __snake_case = True def __lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]: """simple docstring""" super().setUp() a = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] a = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) a = {'''unk_token''': '''<unk>'''} a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] ) with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp: for token in vocab_tokens: fp.write(F"""{token} {vocab_tokens[token]}\n""" ) a = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self : str , **__UpperCAmelCase : Any ) ->List[str]: """simple docstring""" kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __lowerCAmelCase ( self : str , __UpperCAmelCase : Union[str, Any] ) ->List[Any]: """simple docstring""" a = '''This is a là test''' a = '''This is a<unk><unk> test''' return input_text, output_text def __lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" a = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map ) a = '''This is a là test''' a = '''▁This ▁is ▁a ▁l à ▁t est'''.split() a = tokenizer.tokenize(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) a = tokens + [tokenizer.unk_token] a = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
0
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class _SCREAMING_SNAKE_CASE : def __lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) lowerCAmelCase_ :int = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :List[Any] = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase_ :str = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) lowerCAmelCase_ :int = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ :Dict = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase_ :str = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[int] = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Dict = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Dict = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Any = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Optional[int] = inputs["""prompt"""] lowerCAmelCase_ :Optional[int] = inputs["""generator"""] lowerCAmelCase_ :Any = inputs["""num_inference_steps"""] lowerCAmelCase_ :Optional[int] = inputs["""output_type"""] if "image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""image"""] else: lowerCAmelCase_ :int = None if "mask_image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""mask_image"""] else: lowerCAmelCase_ :int = None if "original_image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""original_image"""] else: lowerCAmelCase_ :List[Any] = None lowerCAmelCase_ , lowerCAmelCase_ :int = pipe.encode_prompt(__A ) # inputs with prompt converted to embeddings lowerCAmelCase_ :List[str] = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: lowerCAmelCase_ :int = image if mask_image is not None: lowerCAmelCase_ :Tuple = mask_image if original_image is not None: lowerCAmelCase_ :Optional[Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(__A , __A , __A ) lowerCAmelCase_ :Optional[int] = pipe(**__A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__A ) lowerCAmelCase_ :Optional[int] = self.pipeline_class.from_pretrained(__A ) pipe_loaded.to(__A ) pipe_loaded.set_progress_bar_config(disable=__A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(__A , __A ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) lowerCAmelCase_ :Dict = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Union[str, Any] = inputs["""generator"""] lowerCAmelCase_ :Any = inputs["""num_inference_steps"""] lowerCAmelCase_ :Tuple = inputs["""output_type"""] # inputs with prompt converted to embeddings lowerCAmelCase_ :Tuple = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: lowerCAmelCase_ :Optional[int] = image if mask_image is not None: lowerCAmelCase_ :str = mask_image if original_image is not None: lowerCAmelCase_ :Tuple = original_image lowerCAmelCase_ :Union[str, Any] = pipe_loaded(**__A )[0] lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max() self.assertLess(__A , 1E-4 ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Any = self.get_dummy_components() lowerCAmelCase_ :Optional[int] = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[int] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Dict = pipe(**__A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__A ) lowerCAmelCase_ :Any = self.pipeline_class.from_pretrained(__A ) pipe_loaded.to(__A ) pipe_loaded.set_progress_bar_config(disable=__A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests lowerCAmelCase_ :List[Any] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :str = pipe_loaded(**__A )[0] lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max() self.assertLess(__A , 1E-4 )
84
0
'''simple docstring''' import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __A ( unittest.TestCase ): def _lowercase (self : Any ): UpperCAmelCase_ = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def _lowercase (self : int ): UpperCAmelCase_ = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__a ) ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def _lowercase (self : str ): UpperCAmelCase_ = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(__a ) ) def _lowercase (self : Dict ): UpperCAmelCase_ = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] UpperCAmelCase_ = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = [ "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] UpperCAmelCase_ = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def _lowercase (self : Optional[int] ): # pass variant but use the non-variant filenames UpperCAmelCase_ = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] UpperCAmelCase_ = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] UpperCAmelCase_ = "fp16" self.assertFalse(is_safetensors_compatible(__a , variant=__a ) ) def _lowercase (self : str ): UpperCAmelCase_ = [ "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", ] UpperCAmelCase_ = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def _lowercase (self : Union[str, Any] ): # pass variant but use the non-variant filenames UpperCAmelCase_ = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] UpperCAmelCase_ = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def _lowercase (self : int ): UpperCAmelCase_ = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", # 'text_encoder/model.fp16.safetensors', "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] UpperCAmelCase_ = "fp16" self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
1
"""simple docstring""" import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) lowerCAmelCase_ :int = """A painting of a squirrel eating a burger""" lowerCAmelCase_ :List[Any] = jax.device_count() lowerCAmelCase_ :Optional[Any] = num_samples * [prompt] lowerCAmelCase_ :int = sd_pipe.prepare_inputs(__A ) lowerCAmelCase_ :Optional[Any] = replicate(__A ) lowerCAmelCase_ :Union[str, Any] = shard(__A ) lowerCAmelCase_ :Optional[Any] = jax.random.PRNGKey(0 ) lowerCAmelCase_ :Tuple = jax.random.split(__A , jax.device_count() ) lowerCAmelCase_ :Union[str, Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowerCAmelCase_ :Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1] lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCAmelCase_ :Optional[int] = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Union[str, Any] = """stabilityai/stable-diffusion-2""" lowerCAmelCase_ , lowerCAmelCase_ :Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__A , subfolder="""scheduler""" ) lowerCAmelCase_ , lowerCAmelCase_ :List[str] = FlaxStableDiffusionPipeline.from_pretrained( __A , scheduler=__A , revision="""bf16""" , dtype=jnp.bfloataa , ) lowerCAmelCase_ :Optional[int] = scheduler_params lowerCAmelCase_ :List[Any] = """A painting of a squirrel eating a burger""" lowerCAmelCase_ :Tuple = jax.device_count() lowerCAmelCase_ :str = num_samples * [prompt] lowerCAmelCase_ :Union[str, Any] = sd_pipe.prepare_inputs(__A ) lowerCAmelCase_ :Tuple = replicate(__A ) lowerCAmelCase_ :Optional[int] = shard(__A ) lowerCAmelCase_ :List[str] = jax.random.PRNGKey(0 ) lowerCAmelCase_ :List[Any] = jax.random.split(__A , jax.device_count() ) lowerCAmelCase_ :Optional[Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowerCAmelCase_ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1] lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCAmelCase_ :Dict = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
84
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowerCamelCase : int = logging.get_logger(__name__) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : Tuple , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Optional[Any] ): '''simple docstring''' warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
2
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def _snake_case ( ) -> Generator[int, None, None]: '''simple docstring''' lowerCAmelCase_ :dict[int, int] = {} lowerCAmelCase_ :int = 2 while True: lowerCAmelCase_ :List[Any] = factor_map.pop(lowercase__ , lowercase__ ) if factor: lowerCAmelCase_ :Optional[int] = factor + prime while x in factor_map: x += factor lowerCAmelCase_ :List[str] = factor else: lowerCAmelCase_ :Optional[int] = prime yield prime prime += 1 def _snake_case ( lowercase__ : float = 1E10 ) -> int: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = sieve() lowerCAmelCase_ :str = 1 while True: lowerCAmelCase_ :int = next(lowercase__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(lowercase__ ) n += 2 if __name__ == "__main__": print(solution())
84
0
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = [] A : Union[str, Any] = [] for i in range(self.num_layers ): A : Any = self.in_channels if i == 0 else self.out_channels A : Optional[Any] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Optional[int] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnets A : Union[str, Any] = attentions if self.add_downsample: A : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = () for resnet, attn in zip(self.resnets , self.attentions ): A : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = [] for i in range(self.num_layers ): A : Optional[Any] = self.in_channels if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets if self.add_downsample: A : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: """simple docstring""" A : str = () for resnet in self.resnets: A : Optional[int] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[int] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[Any] = [] A : Optional[int] = [] for i in range(self.num_layers ): A : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : Dict = self.prev_output_channel if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : int = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets A : Optional[Any] = attentions if self.add_upsample: A : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[int]: """simple docstring""" for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states A : List[str] = res_hidden_states_tuple[-1] A : int = res_hidden_states_tuple[:-1] A : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Tuple = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : Dict = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : int = [] for i in range(self.num_layers ): A : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : List[str] = self.prev_output_channel if i == 0 else self.out_channels A : str = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[Any] = resnets if self.add_upsample: A : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Tuple: """simple docstring""" for resnet in self.resnets: # pop res hidden states A : Optional[int] = res_hidden_states_tuple[-1] A : Optional[Any] = res_hidden_states_tuple[:-1] A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Optional[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : str = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] A : List[Any] = [] for _ in range(self.num_layers ): A : int = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[str] = resnets A : List[str] = attentions def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict: """simple docstring""" A : Optional[Any] = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): A : Optional[int] = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) return hidden_states
3
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): # TODO: is there an appropriate internal test set? UpperCAmelCase_ :List[Any] = "ssube/stable-diffusion-x4-upscaler-onnx" def __lowerCAmelCase ( self , __A=0 ) -> Optional[int]: lowerCAmelCase_ :Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__A ) ) lowerCAmelCase_ :List[Any] = torch.manual_seed(__A ) lowerCAmelCase_ :Tuple = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Dict = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :int = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :int = self.get_dummy_inputs() lowerCAmelCase_ :List[str] = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :str = np.array( [0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Union[str, Any] = pipe(**__A ).images lowerCAmelCase_ :Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Tuple = np.array( [0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Union[str, Any] = self.get_dummy_inputs() lowerCAmelCase_ :Optional[Any] = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Tuple = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Dict = pipe(**__A ).images lowerCAmelCase_ :Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Dict = np.array( [0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @property def __lowerCAmelCase ( self ) -> List[Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Optional[int] = ort.SessionOptions() lowerCAmelCase_ :Dict = False return options def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase_ :Optional[Any] = init_image.resize((128, 128) ) # using the PNDM scheduler by default lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Union[str, Any] = """A fantasy landscape, trending on artstation""" lowerCAmelCase_ :List[Any] = torch.manual_seed(0 ) lowerCAmelCase_ :str = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :Dict = output.images lowerCAmelCase_ :List[str] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase_ :Optional[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase_ :List[str] = init_image.resize((128, 128) ) lowerCAmelCase_ :Any = LMSDiscreteScheduler.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" ) lowerCAmelCase_ :Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Any = """A fantasy landscape, trending on artstation""" lowerCAmelCase_ :Optional[Any] = torch.manual_seed(0 ) lowerCAmelCase_ :List[str] = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=20 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :int = output.images lowerCAmelCase_ :List[Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase_ :Union[str, Any] = np.array( [0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
84
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case =logging.get_logger(__name__) __snake_case ={ """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class UpperCAmelCase_ ( __lowercase ): lowerCamelCase : Union[str, Any] = '''speech_to_text_2''' lowerCamelCase : Any = ['''past_key_values'''] lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=1_0_0_0_0 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=1_0_2_4 , **UpperCAmelCase__ : Optional[Any] , ) -> Dict: lowerCAmelCase = vocab_size lowerCAmelCase = d_model lowerCAmelCase = decoder_ffn_dim lowerCAmelCase = decoder_layers lowerCAmelCase = decoder_attention_heads lowerCAmelCase = dropout lowerCAmelCase = attention_dropout lowerCAmelCase = activation_dropout lowerCAmelCase = activation_function lowerCAmelCase = init_std lowerCAmelCase = decoder_layerdrop lowerCAmelCase = use_cache lowerCAmelCase = decoder_layers lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True lowerCAmelCase = max_target_positions super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
4
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Dict: if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=__A , ) assert hasattr(self , """env""" ) def __lowerCAmelCase ( self , __A ) -> Any: # configuration for running training on smdistributed Model Parallel lowerCAmelCase_ :Union[str, Any] = { """enabled""": True, """processes_per_host""": 8, } lowerCAmelCase_ :Tuple = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCAmelCase_ :Any = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCAmelCase_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__A , instance_type=self.instance_type , debugger_hook_config=__A , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=__A , py_version="""py36""" , ) def __lowerCAmelCase ( self , __A ) -> List[Any]: TrainingJobAnalytics(__A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def __lowerCAmelCase ( self , __A ) -> List[str]: # create estimator lowerCAmelCase_ :Any = self.create_estimator(__A ) # run training estimator.fit() # result dataframe lowerCAmelCase_ :Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase_ :List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase_ :Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase_ :Optional[int] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __A )
84
0
import copy import re class lowerCamelCase__ : SCREAMING_SNAKE_CASE__ = '''hp''' SCREAMING_SNAKE_CASE__ = {} SCREAMING_SNAKE_CASE__ = None @classmethod def __A (cls , UpperCAmelCase , UpperCAmelCase ) -> Any: _lowercase =prefix _lowercase =defaults cls.build_naming_info() @staticmethod def __A (UpperCAmelCase , UpperCAmelCase ) -> List[str]: if len(UpperCAmelCase ) == 0: return "" _lowercase =None if any(char.isdigit() for char in word ): raise Exception(f"Parameters should not contain numbers: '{word}' contains a number" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(UpperCAmelCase ) + 1 ): _lowercase =word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: _lowercase =prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(UpperCAmelCase ): _lowercase ='''''' while integer != 0: _lowercase =chr(ord('''A''' ) + integer % 1_0 ) + s integer //= 1_0 return s _lowercase =0 while True: _lowercase =word + '''#''' + int_to_alphabetic(UpperCAmelCase ) if sword in info["reverse_short_word"]: continue else: _lowercase =sword break _lowercase =short_word _lowercase =word return short_word @staticmethod def __A (UpperCAmelCase , UpperCAmelCase ) -> Tuple: _lowercase =param_name.split('''_''' ) _lowercase =[TrialShortNamer.shortname_for_word(UpperCAmelCase , UpperCAmelCase ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name _lowercase =['''''', '''_'''] for separator in separators: _lowercase =separator.join(UpperCAmelCase ) if shortname not in info["reverse_short_param"]: _lowercase =shortname _lowercase =param_name return shortname return param_name @staticmethod def __A (UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: _lowercase =TrialShortNamer.shortname_for_key(UpperCAmelCase , UpperCAmelCase ) _lowercase =short_name _lowercase =param_name @classmethod def __A (cls ) -> Optional[Any]: if cls.NAMING_INFO is not None: return _lowercase ={ '''short_word''': {}, '''reverse_short_word''': {}, '''short_param''': {}, '''reverse_short_param''': {}, } _lowercase =list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(UpperCAmelCase , UpperCAmelCase ) _lowercase =info @classmethod def __A (cls , UpperCAmelCase ) -> Any: cls.build_naming_info() assert cls.PREFIX is not None _lowercase =[copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f"You should provide a default value for the param name {k} with value {v}" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue _lowercase =cls.NAMING_INFO['''short_param'''][k] if isinstance(UpperCAmelCase , UpperCAmelCase ): _lowercase =1 if v else 0 _lowercase ='''''' if isinstance(UpperCAmelCase , (int, float) ) else '''-''' _lowercase =f"{key}{sep}{v}" name.append(UpperCAmelCase ) return "_".join(UpperCAmelCase ) @classmethod def __A (cls , UpperCAmelCase ) -> Optional[int]: _lowercase =repr[len(cls.PREFIX ) + 1 :] if repr == "": _lowercase =[] else: _lowercase =repr.split('''_''' ) _lowercase ={} for value in values: if "-" in value: _lowercase , _lowercase =value.split('''-''' ) else: _lowercase =re.sub('''[0-9.]''' , '''''' , UpperCAmelCase ) _lowercase =float(re.sub('''[^0-9.]''' , '''''' , UpperCAmelCase ) ) _lowercase =cls.NAMING_INFO['''reverse_short_param'''][p_k] _lowercase =p_v for k in cls.DEFAULTS: if k not in parameters: _lowercase =cls.DEFAULTS[k] return parameters
5
"""simple docstring""" def _snake_case ( lowercase__ : int = 1_0 ) -> str: '''simple docstring''' if not isinstance(lowercase__ , lowercase__ ) or n < 0: raise ValueError("""Invalid input""" ) lowerCAmelCase_ :List[str] = 1_0**n lowerCAmelCase_ :int = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , lowercase__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"""{solution(10) = }""")
84
0
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def __lowerCAmelCase ( a__ ) -> Any: monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() ) @pytest.fixture def __lowerCAmelCase ( a__ ) -> List[str]: class __A: def __init__( self , _snake_case ) -> Dict: '''simple docstring''' __a = metric_id class __A: snake_case_ = [MetricMock(a ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']] def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' return self._metrics monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() ) @pytest.mark.parametrize( '''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] ) def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> Optional[int]: if "tmp_path" in args: __a = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args ) with pytest.warns(a__ , match='''https://huggingface.co/docs/evaluate''' ): func(*a__ )
6
"""simple docstring""" import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __UpperCAmelCase = 'src/transformers' __UpperCAmelCase = 'docs/source/en/tasks' def _snake_case ( lowercase__ : str , lowercase__ : List[str] , lowercase__ : Any ) -> str: '''simple docstring''' with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCAmelCase_ :List[Any] = f.readlines() # Find the start prompt. lowerCAmelCase_ :Tuple = 0 while not lines[start_index].startswith(lowercase__ ): start_index += 1 start_index += 1 lowerCAmelCase_ :Dict = start_index while not lines[end_index].startswith(lowercase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH) __UpperCAmelCase = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __UpperCAmelCase = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def _snake_case ( lowercase__ : List[str] ) -> str: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide] lowerCAmelCase_ :List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowercase__ , set() ) lowerCAmelCase_ :Union[str, Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def _snake_case ( lowercase__ : int , lowercase__ : str=False ) -> Dict: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = _find_text_in_file( filename=os.path.join(lowercase__ , lowercase__ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , ) lowerCAmelCase_ :int = get_model_list_for_task(lowercase__ ) if current_list != new_list: if overwrite: with open(os.path.join(lowercase__ , lowercase__ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" """ to fix this.""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __UpperCAmelCase = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
84
0
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A : """simple docstring""" def __init__( self : Any,lowercase_ : List[str],lowercase_ : Any=1_3,lowercase_ : Union[str, Any]=3_2,lowercase_ : Dict=2,lowercase_ : str=3,lowercase_ : List[str]=1_6,lowercase_ : Optional[Any]=[1, 2, 1],lowercase_ : List[str]=[2, 2, 4],lowercase_ : Optional[int]=2,lowercase_ : str=2.0,lowercase_ : Optional[int]=True,lowercase_ : Any=0.0,lowercase_ : Optional[int]=0.0,lowercase_ : Union[str, Any]=0.1,lowercase_ : int="gelu",lowercase_ : List[Any]=False,lowercase_ : Any=True,lowercase_ : Any=0.02,lowercase_ : str=1E-5,lowercase_ : Tuple=True,lowercase_ : Dict=None,lowercase_ : int=True,lowercase_ : Union[str, Any]=1_0,lowercase_ : Tuple=8,)-> List[Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = embed_dim A__ = depths A__ = num_heads A__ = window_size A__ = mlp_ratio A__ = qkv_bias A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = drop_path_rate A__ = hidden_act A__ = use_absolute_embeddings A__ = patch_norm A__ = layer_norm_eps A__ = initializer_range A__ = is_training A__ = scope A__ = use_labels A__ = type_sequence_label_size A__ = encoder_stride def snake_case__ ( self : Dict )-> Any: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size],self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def snake_case__ ( self : Union[str, Any] )-> Tuple: '''simple docstring''' return SwinvaConfig( image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,embed_dim=self.embed_dim,depths=self.depths,num_heads=self.num_heads,window_size=self.window_size,mlp_ratio=self.mlp_ratio,qkv_bias=self.qkv_bias,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,drop_path_rate=self.drop_path_rate,hidden_act=self.hidden_act,use_absolute_embeddings=self.use_absolute_embeddings,path_norm=self.patch_norm,layer_norm_eps=self.layer_norm_eps,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,) def snake_case__ ( self : str,lowercase_ : str,lowercase_ : Any,lowercase_ : int )-> Dict: '''simple docstring''' A__ = SwinvaModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_ ) A__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) A__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, expected_seq_len, expected_dim) ) def snake_case__ ( self : Tuple,lowercase_ : int,lowercase_ : Dict,lowercase_ : List[str] )-> Optional[int]: '''simple docstring''' A__ = SwinvaForMaskedImageModeling(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_ ) self.parent.assertEqual( result.logits.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A__ = 1 A__ = SwinvaForMaskedImageModeling(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ = model(lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, 1, self.image_size, self.image_size) ) def snake_case__ ( self : List[Any],lowercase_ : str,lowercase_ : Tuple,lowercase_ : Dict )-> Tuple: '''simple docstring''' A__ = self.type_sequence_label_size A__ = SwinvaForImageClassification(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) def snake_case__ ( self : int )-> str: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) lowerCamelCase = ( {'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification} if is_torch_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def snake_case__ ( self : Tuple )-> Tuple: '''simple docstring''' A__ = SwinvaModelTester(self ) A__ = ConfigTester(self,config_class=lowercase_,embed_dim=3_7 ) def snake_case__ ( self : Dict )-> Tuple: '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case__ ( self : int )-> Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) @unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' ) def snake_case__ ( self : int )-> Optional[int]: '''simple docstring''' pass @unittest.skip(reason='Swinv2 does not use inputs_embeds' ) def snake_case__ ( self : Any )-> Dict: '''simple docstring''' pass def snake_case__ ( self : int )-> List[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings(),(nn.Module) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_,nn.Linear ) ) def snake_case__ ( self : Any )-> int: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowercase_ ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1],lowercase_ ) def snake_case__ ( self : List[str] )-> Tuple: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True for model_class in self.all_model_classes: A__ = True A__ = False A__ = True A__ = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(lowercase_,lowercase_ ) ) A__ = outputs.attentions A__ = len(self.model_tester.depths ) self.assertEqual(len(lowercase_ ),lowercase_ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ = True A__ = config.window_size**2 A__ = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(lowercase_,lowercase_ ) ) A__ = outputs.attentions self.assertEqual(len(lowercase_ ),lowercase_ ) self.assertListEqual( list(attentions[0].shape[-3:] ),[self.model_tester.num_heads[0], window_size_squared, window_size_squared],) A__ = len(lowercase_ ) # Check attention is always last and order is fine A__ = True A__ = True A__ = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(lowercase_,lowercase_ ) ) if hasattr(self.model_tester,'num_hidden_states_types' ): A__ = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states A__ = 2 self.assertEqual(out_len + added_hidden_states,len(lowercase_ ) ) A__ = outputs.attentions self.assertEqual(len(lowercase_ ),lowercase_ ) self.assertListEqual( list(self_attentions[0].shape[-3:] ),[self.model_tester.num_heads[0], window_size_squared, window_size_squared],) def snake_case__ ( self : Dict,lowercase_ : Tuple,lowercase_ : Optional[Any],lowercase_ : Optional[int],lowercase_ : Dict )-> List[Any]: '''simple docstring''' A__ = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(lowercase_,lowercase_ ) ) A__ = outputs.hidden_states A__ = getattr( self.model_tester,'expected_num_hidden_layers',len(self.model_tester.depths ) + 1 ) self.assertEqual(len(lowercase_ ),lowercase_ ) # Swinv2 has a different seq_length A__ = ( config.patch_size if isinstance(config.patch_size,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) A__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ),[num_patches, self.model_tester.embed_dim],) A__ = outputs.reshaped_hidden_states self.assertEqual(len(lowercase_ ),lowercase_ ) A__ , A__ , A__ , A__ = reshaped_hidden_states[0].shape A__ = ( reshaped_hidden_states[0].view(lowercase_,lowercase_,height * width ).permute(0,2,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ),[num_patches, self.model_tester.embed_dim],) def snake_case__ ( self : int )-> List[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: A__ = True self.check_hidden_states_output(lowercase_,lowercase_,lowercase_,lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True self.check_hidden_states_output(lowercase_,lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : Union[str, Any] )-> Dict: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = 3 A__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) A__ = ( config.patch_size if isinstance(config.patch_size,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) A__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) A__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: A__ = True self.check_hidden_states_output(lowercase_,lowercase_,lowercase_,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True self.check_hidden_states_output(lowercase_,lowercase_,lowercase_,(padded_height, padded_width) ) def snake_case__ ( self : Optional[Any] )-> int: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_ ) def snake_case__ ( self : Optional[int] )-> Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @slow def snake_case__ ( self : str )-> Any: '''simple docstring''' for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = SwinvaModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def snake_case__ ( self : Union[str, Any] )-> Tuple: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = _config_zero_init(lowercase_ ) for model_class in self.all_model_classes: A__ = model_class(config=lowercase_ ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(),[0.0, 1.0],msg=F'Parameter {name} of model {model_class} seems not properly initialized',) @require_vision @require_torch class A ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case__ ( self : int )-> Dict: '''simple docstring''' return ( AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ) if is_vision_available() else None ) @slow def snake_case__ ( self : List[Any] )-> Any: '''simple docstring''' A__ = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to( lowercase_ ) A__ = self.default_image_processor A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) A__ = image_processor(images=lowercase_,return_tensors='pt' ).to(lowercase_ ) # forward pass with torch.no_grad(): A__ = model(**lowercase_ ) # verify the logits A__ = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape,lowercase_ ) A__ = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3],lowercase_,atol=1E-4 ) )
7
"""simple docstring""" def _snake_case ( lowercase__ : list[int] ) -> list[list[int]]: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = [] if len(lowercase__ ) == 1: return [nums.copy()] for _ in range(len(lowercase__ ) ): lowerCAmelCase_ :Optional[Any] = nums.pop(0 ) lowerCAmelCase_ :str = permute(lowercase__ ) for perm in permutations: perm.append(lowercase__ ) result.extend(lowercase__ ) nums.append(lowercase__ ) return result def _snake_case ( lowercase__ : Tuple ) -> List[str]: '''simple docstring''' def backtrack(lowercase__ : str ): if start == len(lowercase__ ) - 1: output.append(nums[:] ) else: for i in range(lowercase__ , len(lowercase__ ) ): lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start] backtrack(start + 1 ) lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start] # backtrack lowerCAmelCase_ :int = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function __UpperCAmelCase = permutea([1, 2, 3]) print(res) doctest.testmod()
84
0
lowerCAmelCase_ = 6_55_21 def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = 1 snake_case_ = 0 for plain_chr in plain_text: snake_case_ = (a + ord(SCREAMING_SNAKE_CASE__ )) % MOD_ADLER snake_case_ = (b + a) % MOD_ADLER return (b << 16) | a
8
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Any = BioGptTokenizer UpperCAmelCase_ :str = False def __lowerCAmelCase ( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase_ :Optional[Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] lowerCAmelCase_ :str = dict(zip(__A , range(len(__A ) ) ) ) lowerCAmelCase_ :int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(__A ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(__A ) ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: lowerCAmelCase_ :List[Any] = """lower newer""" lowerCAmelCase_ :Tuple = """lower newer""" return input_text, output_text def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :List[str] = BioGptTokenizer(self.vocab_file , self.merges_file ) lowerCAmelCase_ :Union[str, Any] = """lower""" lowerCAmelCase_ :Any = ["""low""", """er</w>"""] lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Dict = tokens + ["""<unk>"""] lowerCAmelCase_ :List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) @slow def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) lowerCAmelCase_ :List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__A ) lowerCAmelCase_ :List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A ) lowerCAmelCase_ :Optional[int] = tokenizer.build_inputs_with_special_tokens(__A ) lowerCAmelCase_ :List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
84
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__) __lowerCAmelCase : Dict ={ 'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json', } class _lowercase ( A__ , A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = '''bit''' SCREAMING_SNAKE_CASE__ : Tuple = ['''preactivation''', '''bottleneck'''] SCREAMING_SNAKE_CASE__ : List[str] = ['''SAME''', '''VALID'''] def __init__( self :str , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :str=[256, 512, 1_024, 2_048] , lowerCAmelCase__ :List[str]=[3, 4, 6, 3] , lowerCAmelCase__ :str="preactivation" , lowerCAmelCase__ :str="relu" , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :Union[str, Any]=32 , lowerCAmelCase__ :List[Any]=1 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Optional[int]=None , **lowerCAmelCase__ :Union[str, Any] , ) -> Optional[Any]: super().__init__(**lowerCAmelCase__ ) if layer_type not in self.layer_types: raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' ) if global_padding is not None: if global_padding.upper() in self.supported_padding: __SCREAMING_SNAKE_CASE : Tuple = global_padding.upper() else: raise ValueError(f'''Padding strategy {global_padding} not supported''' ) __SCREAMING_SNAKE_CASE : Optional[int] = num_channels __SCREAMING_SNAKE_CASE : str = embedding_size __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_sizes __SCREAMING_SNAKE_CASE : Union[str, Any] = depths __SCREAMING_SNAKE_CASE : Tuple = layer_type __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act __SCREAMING_SNAKE_CASE : Optional[int] = global_padding __SCREAMING_SNAKE_CASE : Optional[int] = num_groups __SCREAMING_SNAKE_CASE : List[Any] = drop_path_rate __SCREAMING_SNAKE_CASE : Tuple = embedding_dynamic_padding __SCREAMING_SNAKE_CASE : Union[str, Any] = output_stride __SCREAMING_SNAKE_CASE : List[str] = width_factor __SCREAMING_SNAKE_CASE : List[Any] = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase__ ) + 1 )] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = get_aligned_output_features_output_indices( out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
9
"""simple docstring""" from ...configuration_utils import PretrainedConfig class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "bert-generation" def __init__( self , __A=5_0358 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A=2 , __A=1 , __A="absolute" , __A=True , **__A , ) -> Tuple: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :Any = vocab_size lowerCAmelCase_ :List[Any] = hidden_size lowerCAmelCase_ :Optional[int] = num_hidden_layers lowerCAmelCase_ :int = num_attention_heads lowerCAmelCase_ :List[Any] = hidden_act lowerCAmelCase_ :Optional[Any] = intermediate_size lowerCAmelCase_ :List[Any] = hidden_dropout_prob lowerCAmelCase_ :int = attention_probs_dropout_prob lowerCAmelCase_ :Tuple = max_position_embeddings lowerCAmelCase_ :List[str] = initializer_range lowerCAmelCase_ :Union[str, Any] = layer_norm_eps lowerCAmelCase_ :List[str] = position_embedding_type lowerCAmelCase_ :Optional[int] = use_cache
84
0
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = StableDiffusionXLImgaImgPipeline lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} lowercase_ = PipelineTesterMixin.required_optional_params - {"latents"} lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Tuple: '''simple docstring''' torch.manual_seed(0) lowerCamelCase__: Tuple =UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase_ , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) lowerCamelCase__: Tuple =EulerDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , ) torch.manual_seed(0) lowerCamelCase__: Tuple =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) lowerCamelCase__: Union[str, Any] =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=32 , ) lowerCamelCase__: Optional[Any] =CLIPTextModel(UpperCAmelCase_) lowerCamelCase__: List[str] =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =CLIPTextModelWithProjection(UpperCAmelCase_) lowerCamelCase__: Optional[int] =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=UpperCAmelCase_) lowerCamelCase__: int ={ "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_a, "tokenizer_2": tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int]=0) ->List[Any]: '''simple docstring''' lowerCamelCase__: Optional[Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_)).to(UpperCAmelCase_) lowerCamelCase__: Any =image / 2 + 0.5 if str(UpperCAmelCase_).startswith("mps"): lowerCamelCase__: str =torch.manual_seed(UpperCAmelCase_) else: lowerCamelCase__: List[str] =torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_) lowerCamelCase__: Any ={ "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "numpy", "strength": 0.75, } return inputs def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict: '''simple docstring''' lowerCamelCase__: str ="cpu" # ensure determinism for the device-dependent torch.Generator lowerCamelCase__: List[str] =self.get_dummy_components() lowerCamelCase__: Union[str, Any] =StableDiffusionXLImgaImgPipeline(**UpperCAmelCase_) lowerCamelCase__: Dict =sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) lowerCamelCase__: Dict =self.get_dummy_inputs(UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =sd_pipe(**UpperCAmelCase_).images lowerCamelCase__: int =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase__: List[Any] =np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[str]: '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3E-3) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[int]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3) def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Dict: '''simple docstring''' lowerCamelCase__: Optional[int] =self.get_dummy_components() lowerCamelCase__: Dict =StableDiffusionXLImgaImgPipeline(**UpperCAmelCase_) lowerCamelCase__: str =sd_pipe.to(UpperCAmelCase_) lowerCamelCase__: List[Any] =sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) # forward without prompt embeds lowerCamelCase__: int =self.get_dummy_inputs(UpperCAmelCase_) lowerCamelCase__: List[Any] =3 * ["this is a negative prompt"] lowerCamelCase__: Tuple =negative_prompt lowerCamelCase__: int =3 * [inputs["prompt"]] lowerCamelCase__: Tuple =sd_pipe(**UpperCAmelCase_) lowerCamelCase__: Tuple =output.images[0, -3:, -3:, -1] # forward with prompt embeds lowerCamelCase__: Union[str, Any] =self.get_dummy_inputs(UpperCAmelCase_) lowerCamelCase__: Dict =3 * ["this is a negative prompt"] lowerCamelCase__: Any =3 * [inputs.pop("prompt")] ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ): Tuple =sd_pipe.encode_prompt(UpperCAmelCase_ , negative_prompt=UpperCAmelCase_) lowerCamelCase__: int =sd_pipe( **UpperCAmelCase_ , prompt_embeds=UpperCAmelCase_ , negative_prompt_embeds=UpperCAmelCase_ , pooled_prompt_embeds=UpperCAmelCase_ , negative_pooled_prompt_embeds=UpperCAmelCase_ , ) lowerCamelCase__: Optional[int] =output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4 @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]="cpu" , UpperCAmelCase_ : Optional[int]=torch.floataa , UpperCAmelCase_ : Any=0) ->Tuple: '''simple docstring''' lowerCamelCase__: Optional[int] =torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =np.random.RandomState(UpperCAmelCase_).standard_normal((1, 4, 64, 64)) lowerCamelCase__: Union[str, Any] =torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_) lowerCamelCase__: Any ={ "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]: '''simple docstring''' lowerCamelCase__: List[str] =DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base") pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) lowerCamelCase__: int =self.get_inputs(UpperCAmelCase_) lowerCamelCase__: str =pipe(**UpperCAmelCase_).images lowerCamelCase__: Tuple =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase__: int =np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506]) assert np.abs(image_slice - expected_slice).max() < 7E-3
10
"""simple docstring""" def _snake_case ( lowercase__ : List[Any] , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Any ) -> int: '''simple docstring''' lowerCAmelCase_ :int = [False] * len(lowercase__ ) lowerCAmelCase_ :str = [] queue.append(lowercase__ ) lowerCAmelCase_ :Any = True while queue: lowerCAmelCase_ :Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) lowerCAmelCase_ :Union[str, Any] = True lowerCAmelCase_ :int = u return visited[t] def _snake_case ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : str ) -> Dict: '''simple docstring''' lowerCAmelCase_ :List[Any] = [-1] * (len(lowercase__ )) lowerCAmelCase_ :str = 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): lowerCAmelCase_ :List[str] = float("""Inf""" ) lowerCAmelCase_ :List[str] = sink while s != source: # Find the minimum value in select path lowerCAmelCase_ :Any = min(lowercase__ , graph[parent[s]][s] ) lowerCAmelCase_ :Union[str, Any] = parent[s] max_flow += path_flow lowerCAmelCase_ :Tuple = sink while v != source: lowerCAmelCase_ :List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase_ :Union[str, Any] = parent[v] return max_flow __UpperCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __UpperCAmelCase , __UpperCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
84
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = ( "This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image." "It takes two arguments named `image` which should be the original image, and `label` which should be a text " "describing the elements what should be identified in the segmentation mask. The tool returns the mask." ) __SCREAMING_SNAKE_CASE = "CIDAS/clipseg-rd64-refined" __SCREAMING_SNAKE_CASE = "image_segmenter" __SCREAMING_SNAKE_CASE = CLIPSegForImageSegmentation __SCREAMING_SNAKE_CASE = ["image", "text"] __SCREAMING_SNAKE_CASE = ["image"] def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> str: requires_backends(self , ["vision"]) super().__init__(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> List[Any]: return self.pre_processor(text=[label] , images=[image] , padding=__lowerCamelCase , return_tensors="pt") def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[int]: with torch.no_grad(): _A : Dict = self.model(**__lowerCamelCase).logits return logits def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: _A : Any = outputs.cpu().detach().numpy() _A : int = 0 _A : List[Any] = 1 return Image.fromarray((array * 2_5_5).astype(np.uinta))
11
"""simple docstring""" import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = 1_0 lowerCAmelCase_ :Optional[int] = datasets.Features( { """tokens""": datasets.Sequence(datasets.Value("""string""" ) ), """labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ), """answers""": datasets.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), """id""": datasets.Value("""int64""" ), } ) lowerCAmelCase_ :int = datasets.Dataset.from_dict( { """tokens""": [["""foo"""] * 5] * n, """labels""": [[1] * 5] * n, """answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0, """id""": list(range(lowercase__ ) ), } , features=lowercase__ , ) return dataset @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : int ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return filename # FILE_CONTENT + files __UpperCAmelCase = '\\n Text data.\n Second line of data.' @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> str: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt""" lowerCAmelCase_ :List[Any] = FILE_CONTENT with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> Tuple: '''simple docstring''' import bza lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2""" lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] ) -> Dict: '''simple docstring''' import gzip lowerCAmelCase_ :int = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" ) lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with gzip.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' if datasets.config.LZ4_AVAILABLE: import lza.frame lowerCAmelCase_ :List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4""" lowerCAmelCase_ :int = bytes(lowercase__ , """utf-8""" ) with lza.frame.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[int] ) -> Any: '''simple docstring''' if datasets.config.PY7ZR_AVAILABLE: import pyazr lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z""" with pyazr.SevenZipFile(lowercase__ , """w""" ) as archive: archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' import tarfile lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> str: '''simple docstring''' import lzma lowerCAmelCase_ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz""" lowerCAmelCase_ :Optional[Any] = bytes(lowercase__ , """utf-8""" ) with lzma.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import zipfile lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Tuple: '''simple docstring''' if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst""" lowerCAmelCase_ :Any = bytes(lowercase__ , """utf-8""" ) with zstd.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """file.xml""" lowerCAmelCase_ :Any = textwrap.dedent( """\ <?xml version=\"1.0\" encoding=\"UTF-8\" ?> <tmx version=\"1.4\"> <header segtype=\"sentence\" srclang=\"ca\" /> <body> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv> </tu> </body> </tmx>""" ) with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename __UpperCAmelCase = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __UpperCAmelCase = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __UpperCAmelCase = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __UpperCAmelCase = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __UpperCAmelCase = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Union[str, Any]: '''simple docstring''' return DATA_DICT_OF_LISTS @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Any: '''simple docstring''' lowerCAmelCase_ :Tuple = datasets.Dataset.from_dict(lowercase__ ) lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" ) with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con: lowerCAmelCase_ :Union[str, Any] = con.cursor() cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" ) for item in DATA: cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> int: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Optional[int] = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Any: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Dict = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Dict ) -> Union[str, Any]: '''simple docstring''' import bza lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2""" with open(lowercase__ , """rb""" ) as f: lowerCAmelCase_ :Union[str, Any] = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) ) f.write(lowercase__ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : str ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" ) lowerCAmelCase_ :Optional[Any] = pa.schema( { """col_1""": pa.string(), """col_2""": pa.intaa(), """col_3""": pa.floataa(), } ) with open(lowercase__ , """wb""" ) as f: lowerCAmelCase_ :Optional[int] = pq.ParquetWriter(lowercase__ , schema=lowercase__ ) lowerCAmelCase_ :List[str] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ ) writer.write_table(lowercase__ ) writer.close() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Union[str, Any] = {"""data""": DATA} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Optional[Any] = {"""data""": DATA_DICT_OF_LISTS} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_312: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_STR: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int , lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : List[Any] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[str] ) -> int: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : List[str] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : str , lowercase__ : List[str] , lowercase__ : int ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :str = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Dict = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.abc""" with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : List[str] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename("""unsupported.ext""" ) ) f.write(lowercase__ , arcname=os.path.basename("""unsupported_2.ext""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] ) lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" ) with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> int: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Tuple: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : Tuple ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace(""".jpg""" , """2.jpg""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data_dir""" ) (data_dir / "subdir").mkdir() with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden file with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) return data_dir
84
0
from ...configuration_utils import PretrainedConfig class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Optional[int] = 'bert-generation' def __init__( self: Dict , UpperCamelCase_: Optional[Any]=5_03_58 , UpperCamelCase_: int=10_24 , UpperCamelCase_: Union[str, Any]=24 , UpperCamelCase_: List[Any]=16 , UpperCamelCase_: Optional[int]=40_96 , UpperCamelCase_: Any="gelu" , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[Any]=5_12 , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: int=1E-12 , UpperCamelCase_: str=0 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: List[str]=1 , UpperCamelCase_: List[str]="absolute" , UpperCamelCase_: str=True , **UpperCamelCase_: int , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = use_cache
12
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Optional[Any] = "data2vec-text" def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.0_2 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> Tuple: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :Dict = vocab_size lowerCAmelCase_ :Dict = hidden_size lowerCAmelCase_ :int = num_hidden_layers lowerCAmelCase_ :List[Any] = num_attention_heads lowerCAmelCase_ :Any = hidden_act lowerCAmelCase_ :Optional[int] = intermediate_size lowerCAmelCase_ :str = hidden_dropout_prob lowerCAmelCase_ :Any = attention_probs_dropout_prob lowerCAmelCase_ :str = max_position_embeddings lowerCAmelCase_ :int = type_vocab_size lowerCAmelCase_ :Tuple = initializer_range lowerCAmelCase_ :List[Any] = layer_norm_eps lowerCAmelCase_ :List[Any] = position_embedding_type lowerCAmelCase_ :List[Any] = use_cache lowerCAmelCase_ :List[Any] = classifier_dropout class _SCREAMING_SNAKE_CASE ( A__ ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCAmelCase_ :List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCAmelCase_ :List[str] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
84
0
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __lowercase ( unittest.TestCase ): """simple docstring""" def __init__( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict=13 , lowerCAmelCase__ : int=7 , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict=99 , lowerCAmelCase__ : List[Any]=32 , lowerCAmelCase__ : Dict=5 , lowerCAmelCase__ : Union[str, Any]=4 , lowerCAmelCase__ : List[str]=37 , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Tuple=512 , lowerCAmelCase__ : Dict=16 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : List[str]=4 , ): SCREAMING_SNAKE_CASE_: Optional[int] = parent SCREAMING_SNAKE_CASE_: Union[str, Any] = batch_size SCREAMING_SNAKE_CASE_: Tuple = seq_length SCREAMING_SNAKE_CASE_: Optional[Any] = is_training SCREAMING_SNAKE_CASE_: int = use_attention_mask SCREAMING_SNAKE_CASE_: Optional[int] = use_token_type_ids SCREAMING_SNAKE_CASE_: Tuple = use_labels SCREAMING_SNAKE_CASE_: List[Any] = vocab_size SCREAMING_SNAKE_CASE_: Any = hidden_size SCREAMING_SNAKE_CASE_: str = num_hidden_layers SCREAMING_SNAKE_CASE_: Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE_: List[str] = intermediate_size SCREAMING_SNAKE_CASE_: Dict = hidden_act SCREAMING_SNAKE_CASE_: Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE_: List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_: Any = max_position_embeddings SCREAMING_SNAKE_CASE_: str = type_vocab_size SCREAMING_SNAKE_CASE_: Optional[int] = type_sequence_label_size SCREAMING_SNAKE_CASE_: List[str] = initializer_range SCREAMING_SNAKE_CASE_: Dict = num_choices def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_: Dict = None if self.use_attention_mask: SCREAMING_SNAKE_CASE_: str = random_attention_mask([self.batch_size, self.seq_length]) SCREAMING_SNAKE_CASE_: Tuple = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE_: int = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: List[Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE_: Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : List[Any] = True _UpperCAmelCase : str = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: Tuple = FlaxRoFormerModelTester(self) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE_: Optional[int] = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = model(np.ones((1, 1))) self.assertIsNotNone(lowerCAmelCase__) @require_flax class __lowercase ( unittest.TestCase ): """simple docstring""" @slow def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: List[str] = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base") SCREAMING_SNAKE_CASE_: Union[str, Any] = jnp.array([[0, 1, 2, 3, 4, 5]]) SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__)[0] SCREAMING_SNAKE_CASE_: str = 5_0000 SCREAMING_SNAKE_CASE_: Optional[int] = (1, 6, vocab_size) self.assertEqual(output.shape , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]]) self.assertTrue(jnp.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4))
13
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( lowercase__ : Dict , lowercase__ : Dict , lowercase__ : str , lowercase__ : Tuple="attention" ) -> str: '''simple docstring''' lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""] lowerCAmelCase_ :Union[str, Any] = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""] lowerCAmelCase_ :Any = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""] lowerCAmelCase_ :Optional[int] = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""] return k, o, q, v def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : Any=False ) -> int: '''simple docstring''' if split_mlp_wi: lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""] lowerCAmelCase_ :List[str] = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""] lowerCAmelCase_ :Tuple = (wi_a, wi_a) else: lowerCAmelCase_ :List[Any] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""] lowerCAmelCase_ :Dict = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""] return wi, wo def _snake_case ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] ) -> Tuple: '''simple docstring''' return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""] def _snake_case ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = traverse_util.flatten_dict(variables["""target"""] ) lowerCAmelCase_ :Tuple = {"""/""".join(lowercase__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowerCAmelCase_ :Any = """encoder/layers_0/mlp/wi_0/kernel""" in old print("""Split MLP:""" , lowercase__ ) lowerCAmelCase_ :List[Any] = collections.OrderedDict() # Shared embeddings. lowerCAmelCase_ :Optional[int] = old["""token_embedder/embedding"""] # Encoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" ) lowerCAmelCase_ :Optional[Any] = layer_norm lowerCAmelCase_ :Any = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Tuple = q.T lowerCAmelCase_ :str = v.T # Block i, layer 1 (MLP). lowerCAmelCase_ :Dict = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Any = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ ) lowerCAmelCase_ :Union[str, Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :List[Any] = wi[0].T lowerCAmelCase_ :Dict = wi[1].T else: lowerCAmelCase_ :int = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Tuple = old[ """encoder/relpos_bias/rel_embedding""" ].T lowerCAmelCase_ :List[str] = old["""encoder/encoder_norm/scale"""] if not is_encoder_only: # Decoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" ) lowerCAmelCase_ :List[Any] = layer_norm lowerCAmelCase_ :List[str] = k.T lowerCAmelCase_ :Any = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :Dict = v.T # Block i, layer 1 (Cross Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" ) lowerCAmelCase_ :Optional[int] = layer_norm lowerCAmelCase_ :str = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :int = v.T # Block i, layer 2 (MLP). lowerCAmelCase_ :Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Dict = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ ) lowerCAmelCase_ :List[Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :Any = wi[0].T lowerCAmelCase_ :Any = wi[1].T else: lowerCAmelCase_ :Tuple = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Optional[Any] = old["""decoder/decoder_norm/scale"""] lowerCAmelCase_ :Optional[Any] = old[ """decoder/relpos_bias/rel_embedding""" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowerCAmelCase_ :Tuple = old["""decoder/logits_dense/kernel"""].T return new def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Optional[int] = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Tuple = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) lowerCAmelCase_ :Any = state_dict["""shared.weight"""] return state_dict def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :List[Any] = checkpoints.load_tax_checkpoint(lowercase__ ) lowerCAmelCase_ :Optional[int] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ ) lowerCAmelCase_ :Union[str, Any] = make_state_dict(lowercase__ , lowercase__ ) model.load_state_dict(lowercase__ , strict=lowercase__ ) def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : bool = False ) -> Any: '''simple docstring''' lowerCAmelCase_ :Any = TaConfig.from_json_file(lowercase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowerCAmelCase_ :List[Any] = TaEncoderModel(lowercase__ ) else: lowerCAmelCase_ :List[str] = TaForConditionalGeneration(lowercase__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(lowercase__ ) # Verify that we can load the checkpoint. model.from_pretrained(lowercase__ ) print("""Done""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) __UpperCAmelCase = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
84
0
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[list[float]]: """simple docstring""" A__ = [] for data in source_data: for i, el in enumerate(lowercase_ ): if len(lowercase_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(lowercase_ ) ) return data_lists def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> list[list[float]]: """simple docstring""" A__ = [] for dlist, weight in zip(lowercase_ , lowercase_ ): A__ = min(lowercase_ ) A__ = max(lowercase_ ) A__ = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: A__ = f"""Invalid weight of {weight:f} provided""" raise ValueError(lowercase_ ) score_lists.append(lowercase_ ) return score_lists def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[float]: """simple docstring""" A__ = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(lowercase_ ): A__ = final_scores[j] + ele return final_scores def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> list[list[float]]: """simple docstring""" A__ = get_data(lowercase_ ) A__ = calculate_each_score(lowercase_ , lowercase_ ) A__ = generate_final_scores(lowercase_ ) # append scores to source data for i, ele in enumerate(lowercase_ ): source_data[i].append(lowercase_ ) return source_data
14
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( lowercase__ : Optional[Any] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): lowerCAmelCase_ :Union[str, Any] = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): lowerCAmelCase_ :Any = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase_ :List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] lowerCAmelCase_ :Tuple = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowercase__ )-1}""" ) if "norm" in key: lowerCAmelCase_ :Dict = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase_ :str = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] lowerCAmelCase_ :str = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowercase__ )-1}""" ) if "layer_norm1" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: lowerCAmelCase_ :str = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase_ :List[str] = key[key.find("""block""" ) + len("""block""" )] lowerCAmelCase_ :int = key.replace(f"""block{idx}""" , f"""block.{int(lowercase__ )-1}""" ) if "attn.q" in key: lowerCAmelCase_ :Tuple = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: lowerCAmelCase_ :Optional[int] = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: lowerCAmelCase_ :str = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: lowerCAmelCase_ :List[Any] = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: lowerCAmelCase_ :List[str] = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: lowerCAmelCase_ :str = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) lowerCAmelCase_ :Any = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase_ :str = key[key.find("""linear_c""" ) + len("""linear_c""" )] lowerCAmelCase_ :Optional[int] = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowercase__ )-1}""" ) if "bot_conv" in key: lowerCAmelCase_ :Union[str, Any] = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: lowerCAmelCase_ :int = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: lowerCAmelCase_ :str = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: lowerCAmelCase_ :Any = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: lowerCAmelCase_ :List[str] = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: lowerCAmelCase_ :Dict = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: lowerCAmelCase_ :Any = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): lowerCAmelCase_ :Tuple = key.replace("""module.last_layer_depth""" , """head.head""" ) lowerCAmelCase_ :List[Any] = value return new_state_dict def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" ) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict lowerCAmelCase_ :Optional[Any] = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase_ :Union[str, Any] = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase_ :List[Any] = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase_ :int = kv_bias[config.hidden_sizes[i] :] def _snake_case ( ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCAmelCase_ :Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return image @torch.no_grad() def _snake_case ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Dict=False , lowercase__ : List[Any]=None ) -> int: '''simple docstring''' lowerCAmelCase_ :int = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] ) # load image processor (only resize + rescale) lowerCAmelCase_ :Union[str, Any] = GLPNImageProcessor() # prepare image lowerCAmelCase_ :List[Any] = prepare_img() lowerCAmelCase_ :int = image_processor(images=lowercase__ , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict lowerCAmelCase_ :Tuple = torch.load(lowercase__ , map_location=torch.device("""cpu""" ) ) # rename keys lowerCAmelCase_ :Union[str, Any] = rename_keys(lowercase__ ) # key and value matrices need special treatment read_in_k_v(lowercase__ , lowercase__ ) # create HuggingFace model and load state dict lowerCAmelCase_ :List[Any] = GLPNForDepthEstimation(lowercase__ ) model.load_state_dict(lowercase__ ) model.eval() # forward pass lowerCAmelCase_ :Dict = model(lowercase__ ) lowerCAmelCase_ :Tuple = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: lowerCAmelCase_ :Optional[Any] = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: lowerCAmelCase_ :Any = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(f"""Unknown model name: {model_name}""" ) lowerCAmelCase_ :Union[str, Any] = torch.Size([1, 4_8_0, 6_4_0] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , lowercase__ , atol=1E-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowercase__ , ) image_processor.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowercase__ , ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) parser.add_argument( '--model_name', default='glpn-kitti', type=str, help='Name of the model in case you\'re pushing to the hub.', ) __UpperCAmelCase = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
84
0
import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE :Optional[int] = '▁' SCREAMING_SNAKE_CASE :List[Any] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' snake_case_ = BigBirdTokenizer snake_case_ = BigBirdTokenizerFast snake_case_ = True snake_case_ = True def UpperCamelCase_ ( self : Dict ): super().setUp() __A = self.tokenizer_class(A ,keep_accents=A ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self : Optional[Any] ): __A = "<s>" __A = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A ) def UpperCamelCase_ ( self : int ): __A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"<unk>" ) self.assertEqual(vocab_keys[1] ,"<s>" ) self.assertEqual(vocab_keys[-1] ,"[MASK]" ) self.assertEqual(len(A ) ,10_04 ) def UpperCamelCase_ ( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size ,10_00 ) def UpperCamelCase_ ( self : str ): if not self.test_rust_tokenizer: return __A = self.get_tokenizer() __A = self.get_rust_tokenizer() __A = "I was born in 92000, and this is falsé." __A = tokenizer.tokenize(A ) __A = rust_tokenizer.tokenize(A ) self.assertListEqual(A ,A ) __A = tokenizer.encode(A ,add_special_tokens=A ) __A = rust_tokenizer.encode(A ,add_special_tokens=A ) self.assertListEqual(A ,A ) __A = self.get_rust_tokenizer() __A = tokenizer.encode(A ) __A = rust_tokenizer.encode(A ) self.assertListEqual(A ,A ) def UpperCamelCase_ ( self : Any ): __A = BigBirdTokenizer(A ,keep_accents=A ) __A = tokenizer.tokenize("This is a test" ) self.assertListEqual(A ,["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A ) ,[2_85, 46, 10, 1_70, 3_82] ,) __A = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( A ,[ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] ,) __A = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual( A ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] ,) __A = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A ,[ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] ,) @cached_property def UpperCamelCase_ ( self : str ): return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" ) @slow def UpperCamelCase_ ( self : List[str] ): __A = "Hello World!" __A = [65, 1_85_36, 22_60, 1_01, 66] self.assertListEqual(A ,self.big_tokenizer.encode(A ) ) @slow def UpperCamelCase_ ( self : Dict ): __A = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) # fmt: off __A = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231 # fmt: on self.assertListEqual(A ,self.big_tokenizer.encode(A ) ) @require_torch @slow def UpperCamelCase_ ( self : Any ): import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence __A = list(self.big_tokenizer.get_vocab().keys() )[:10] __A = " ".join(A ) __A = self.big_tokenizer.encode_plus(A ,return_tensors="pt" ,return_token_type_ids=A ) __A = self.big_tokenizer.batch_encode_plus( [sequence + " " + sequence] ,return_tensors="pt" ,return_token_type_ids=A ) __A = BigBirdConfig(attention_type="original_full" ) __A = BigBirdModel(A ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**A ) model(**A ) @slow def UpperCamelCase_ ( self : List[str] ): __A = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" ) __A = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids ) self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" ) @slow def UpperCamelCase_ ( self : str ): # fmt: off __A = {"input_ids": [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A ,model_name="google/bigbird-roberta-base" ,revision="215c99f1600e06f83acce68422f2035b2b5c3510" ,)
15
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
84
0
"""simple docstring""" import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('>=', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType lowerCAmelCase_ = get_logger(__name__) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0 ) -> List[Any]: os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) with FSDP.state_dict_type( __lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): lowercase__ : Optional[int] = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: lowercase__ : Optional[Any] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin""" lowercase__ : Tuple = os.path.join(__lowerCamelCase , __lowerCamelCase ) if accelerator.process_index == 0: logger.info(f"""Saving model to {output_model_file}""" ) torch.save(__lowerCamelCase , __lowerCamelCase ) logger.info(f"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: lowercase__ : Optional[int] = ( f"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) lowercase__ : Any = os.path.join(__lowerCamelCase , __lowerCamelCase ) logger.info(f"""Saving model to {output_model_file}""" ) torch.save(__lowerCamelCase , __lowerCamelCase ) logger.info(f"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: lowercase__ : Union[str, Any] = os.path.join(__lowerCamelCase , f"""{MODEL_NAME}_{model_index}""" ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) logger.info(f"""Saving model to {ckpt_dir}""" ) lowercase__ : Any = {'''model''': state_dict} dist_cp.save_state_dict( state_dict=__lowerCamelCase , storage_writer=dist_cp.FileSystemWriter(__lowerCamelCase ) , planner=DefaultSavePlanner() , ) logger.info(f"""Model saved to {ckpt_dir}""" ) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0 ) -> Tuple: accelerator.wait_for_everyone() with FSDP.state_dict_type( __lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(__lowerCamelCase ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( '''Set the `sync_module_states` flag to `True` so that model states are synced across processes when ''' '''initializing FSDP object''' ) return lowercase__ : Optional[Any] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin""" lowercase__ : Any = os.path.join(__lowerCamelCase , __lowerCamelCase ) logger.info(f"""Loading model from {input_model_file}""" ) lowercase__ : List[Any] = torch.load(__lowerCamelCase ) logger.info(f"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: lowercase__ : List[Any] = ( f"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) lowercase__ : Dict = os.path.join(__lowerCamelCase , __lowerCamelCase ) logger.info(f"""Loading model from {input_model_file}""" ) lowercase__ : List[str] = torch.load(__lowerCamelCase ) logger.info(f"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: lowercase__ : str = ( os.path.join(__lowerCamelCase , f"""{MODEL_NAME}_{model_index}""" ) if f"""{MODEL_NAME}""" not in input_dir else input_dir ) logger.info(f"""Loading model from {ckpt_dir}""" ) lowercase__ : Dict = {'''model''': model.state_dict()} dist_cp.load_state_dict( state_dict=__lowerCamelCase , storage_reader=dist_cp.FileSystemReader(__lowerCamelCase ) , planner=DefaultLoadPlanner() , ) lowercase__ : Optional[int] = state_dict['''model'''] logger.info(f"""Model loaded from {ckpt_dir}""" ) model.load_state_dict(__lowerCamelCase ) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0 ) -> int: os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) with FSDP.state_dict_type( __lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): lowercase__ : Tuple = FSDP.optim_state_dict(__lowerCamelCase , __lowerCamelCase ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: lowercase__ : List[Any] = ( f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) lowercase__ : Dict = os.path.join(__lowerCamelCase , __lowerCamelCase ) logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" ) torch.save(__lowerCamelCase , __lowerCamelCase ) logger.info(f"""Optimizer state saved in {output_optimizer_file}""" ) else: lowercase__ : Any = os.path.join(__lowerCamelCase , f"""{OPTIMIZER_NAME}_{optimizer_index}""" ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) logger.info(f"""Saving Optimizer state to {ckpt_dir}""" ) dist_cp.save_state_dict( state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(__lowerCamelCase ) , planner=DefaultSavePlanner() , ) logger.info(f"""Optimizer state saved in {ckpt_dir}""" ) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0 ) -> Any: accelerator.wait_for_everyone() with FSDP.state_dict_type( __lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: lowercase__ : Optional[Any] = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: lowercase__ : Optional[int] = ( f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) lowercase__ : Any = os.path.join(__lowerCamelCase , __lowerCamelCase ) logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" ) lowercase__ : Tuple = torch.load(__lowerCamelCase ) logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" ) else: lowercase__ : Any = ( os.path.join(__lowerCamelCase , f"""{OPTIMIZER_NAME}_{optimizer_index}""" ) if f"""{OPTIMIZER_NAME}""" not in input_dir else input_dir ) logger.info(f"""Loading Optimizer from {ckpt_dir}""" ) lowercase__ : List[str] = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(__lowerCamelCase ) , ) lowercase__ : int = optim_state['''optimizer'''] logger.info(f"""Optimizer loaded from {ckpt_dir}""" ) lowercase__ : List[Any] = FSDP.optim_state_dict_to_load(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) optimizer.load_state_dict(__lowerCamelCase )
16
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json', # See all LeViT models at https://huggingface.co/models?filter=levit } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "levit" def __init__( self , __A=224 , __A=3 , __A=3 , __A=2 , __A=1 , __A=16 , __A=[128, 256, 384] , __A=[4, 8, 12] , __A=[4, 4, 4] , __A=[16, 16, 16] , __A=0 , __A=[2, 2, 2] , __A=[2, 2, 2] , __A=0.0_2 , **__A , ) -> Any: super().__init__(**__A ) lowerCAmelCase_ :Tuple = image_size lowerCAmelCase_ :Optional[int] = num_channels lowerCAmelCase_ :Union[str, Any] = kernel_size lowerCAmelCase_ :Optional[Any] = stride lowerCAmelCase_ :Optional[int] = padding lowerCAmelCase_ :Optional[Any] = hidden_sizes lowerCAmelCase_ :Optional[int] = num_attention_heads lowerCAmelCase_ :int = depths lowerCAmelCase_ :List[str] = key_dim lowerCAmelCase_ :str = drop_path_rate lowerCAmelCase_ :Optional[int] = patch_size lowerCAmelCase_ :Union[str, Any] = attention_ratio lowerCAmelCase_ :Dict = mlp_ratio lowerCAmelCase_ :Any = initializer_range lowerCAmelCase_ :Optional[int] = [ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Tuple = version.parse("1.11" ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __lowerCAmelCase ( self ) -> float: return 1E-4
84
0
"""simple docstring""" import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Optional[int] ): with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights __lowercase = FlaxDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=UpperCAmelCase__, cache_dir=UpperCAmelCase__ ) __lowercase = [t[-1] for t in os.walk(os.path.join(UpperCAmelCase__, os.listdir(UpperCAmelCase__ )[0], "snapshots" ) )] __lowercase = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith(".bin" ) for f in files ) @slow @require_flax class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : List[Any] ): __lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=UpperCAmelCase__ ) __lowercase = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) __lowercase = jax.random.PRNGKey(0 ) __lowercase = 4 __lowercase = jax.device_count() __lowercase = num_samples * [prompt] __lowercase = pipeline.prepare_inputs(UpperCAmelCase__ ) # shard inputs and rng __lowercase = replicate(UpperCAmelCase__ ) __lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = shard(UpperCAmelCase__ ) __lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images assert images.shape == (num_samples, 1, 6_4, 6_4, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1E-3 assert np.abs(np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 49_947.875 ) < 5E-1 __lowercase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(UpperCAmelCase__ ) == num_samples def _lowercase ( self : int ): __lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="flax", safety_checker=UpperCAmelCase__ ) __lowercase = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) __lowercase = jax.random.PRNGKey(0 ) __lowercase = 5_0 __lowercase = jax.device_count() __lowercase = num_samples * [prompt] __lowercase = pipeline.prepare_inputs(UpperCAmelCase__ ) # shard inputs and rng __lowercase = replicate(UpperCAmelCase__ ) __lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = shard(UpperCAmelCase__ ) __lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1E-3 assert np.abs((np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5E-1 def _lowercase ( self : Optional[int] ): __lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, safety_checker=UpperCAmelCase__ ) __lowercase = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) __lowercase = jax.random.PRNGKey(0 ) __lowercase = 5_0 __lowercase = jax.device_count() __lowercase = num_samples * [prompt] __lowercase = pipeline.prepare_inputs(UpperCAmelCase__ ) # shard inputs and rng __lowercase = replicate(UpperCAmelCase__ ) __lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = shard(UpperCAmelCase__ ) __lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3 assert np.abs((np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1 def _lowercase ( self : Optional[int] ): __lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa ) __lowercase = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) __lowercase = jax.random.PRNGKey(0 ) __lowercase = 5_0 __lowercase = jax.device_count() __lowercase = num_samples * [prompt] __lowercase = pipeline.prepare_inputs(UpperCAmelCase__ ) # shard inputs and rng __lowercase = replicate(UpperCAmelCase__ ) __lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = shard(UpperCAmelCase__ ) __lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3 assert np.abs((np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1 def _lowercase ( self : Optional[int] ): __lowercase = FlaxDDIMScheduler( beta_start=0.00_085, beta_end=0.012, beta_schedule="scaled_linear", set_alpha_to_one=UpperCAmelCase__, steps_offset=1, ) __lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, scheduler=UpperCAmelCase__, safety_checker=UpperCAmelCase__, ) __lowercase = scheduler.create_state() __lowercase = scheduler_state __lowercase = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) __lowercase = jax.random.PRNGKey(0 ) __lowercase = 5_0 __lowercase = jax.device_count() __lowercase = num_samples * [prompt] __lowercase = pipeline.prepare_inputs(UpperCAmelCase__ ) # shard inputs and rng __lowercase = replicate(UpperCAmelCase__ ) __lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = shard(UpperCAmelCase__ ) __lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1E-3 assert np.abs((np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5E-1 def _lowercase ( self : Dict ): __lowercase = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) __lowercase = jax.device_count() __lowercase = num_samples * [prompt] __lowercase = jax.random.split(jax.random.PRNGKey(0 ), UpperCAmelCase__ ) __lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, safety_checker=UpperCAmelCase__, ) __lowercase = replicate(UpperCAmelCase__ ) __lowercase = pipeline.prepare_inputs(UpperCAmelCase__ ) __lowercase = shard(UpperCAmelCase__ ) __lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) __lowercase = images[2, 0, 2_5_6, 1_0:1_7, 1] # With memory efficient attention __lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, safety_checker=UpperCAmelCase__, use_memory_efficient_attention=UpperCAmelCase__, ) __lowercase = replicate(UpperCAmelCase__ ) __lowercase = pipeline.prepare_inputs(UpperCAmelCase__ ) __lowercase = shard(UpperCAmelCase__ ) __lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) __lowercase = images[2, 0, 2_5_6, 1_0:1_7, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1E-2
17
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def _snake_case ( lowercase__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = FileLock(str(tmpdir / """foo.lock""" ) ) lowerCAmelCase_ :Union[str, Any] = FileLock(str(tmpdir / """foo.lock""" ) ) lowerCAmelCase_ :Dict = 0.01 with locka.acquire(): with pytest.raises(lowercase__ ): lowerCAmelCase_ :List[Any] = time.time() locka.acquire(lowercase__ ) assert time.time() - _start > timeout def _snake_case ( lowercase__ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = """a""" * 1_0_0_0 + """.lock""" lowerCAmelCase_ :Optional[Any] = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(lowercase__ ) assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5 lowerCAmelCase_ :Any = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(lowercase__ ): locka.acquire(0 )
84
0
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class a__ ( A__ ): A = ['image_processor', 'tokenizer'] A = 'ChineseCLIPImageProcessor' A = ('BertTokenizer', 'BertTokenizerFast') def __init__( self : Any,_A : str=None,_A : Union[str, Any]=None,**_A : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.",_A,) SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop("feature_extractor" ) SCREAMING_SNAKE_CASE_ : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_A,_A ) SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processor def __call__( self : Dict,_A : Optional[Any]=None,_A : str=None,_A : List[Any]=None,**_A : str ): """simple docstring""" if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer(_A,return_tensors=_A,**_A ) if images is not None: SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor(_A,return_tensors=_A,**_A ) if text is not None and images is not None: SCREAMING_SNAKE_CASE_ : int = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A ),tensor_type=_A ) def __UpperCamelCase ( self : Union[str, Any],*_A : str,**_A : List[Any] ): """simple docstring""" return self.tokenizer.batch_decode(*_A,**_A ) def __UpperCamelCase ( self : str,*_A : str,**_A : int ): """simple docstring""" return self.tokenizer.decode(*_A,**_A ) @property def __UpperCamelCase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE_ : str = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __UpperCamelCase ( self : str ): """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",_A,) return self.image_processor_class
18
"""simple docstring""" from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __UpperCAmelCase = 1.054571817e-34 # unit of ℏ : J * s __UpperCAmelCase = 3e8 # unit of c : m * s^-1 def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> dict[str, float]: '''simple docstring''' if (force, area, distance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if force < 0: raise ValueError("""Magnitude of force can not be negative""" ) if distance < 0: raise ValueError("""Distance can not be negative""" ) if area < 0: raise ValueError("""Area can not be negative""" ) if force == 0: lowerCAmelCase_ :Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_4_0 * (distance) ** 4 ) return {"force": force} elif area == 0: lowerCAmelCase_ :Optional[Any] = (2_4_0 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: lowerCAmelCase_ :Any = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("""One and only one argument must be 0""" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
84
0
import math def lowerCamelCase_ ( lowerCamelCase__ ): lowerCamelCase_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(lowerCamelCase__ ) def lowerCamelCase_ ( lowerCamelCase__ = 1 / 1_2_3_4_5 ): lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 3 while True: lowerCamelCase_ = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(lowerCamelCase__ ): lowerCamelCase_ = int(lowerCamelCase__ ) total_partitions += 1 if check_partition_perfect(lowerCamelCase__ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(lowerCamelCase__ ) integer += 1 if __name__ == "__main__": print(F"""{solution() = }""")
19
"""simple docstring""" def _snake_case ( lowercase__ : str , lowercase__ : str ) -> int: '''simple docstring''' if len(lowercase__ ) != len(lowercase__ ): raise ValueError("""String lengths must match!""" ) lowerCAmelCase_ :Optional[int] = 0 for chara, chara in zip(lowercase__ , lowercase__ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
84
0