code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
__magic_name__ = { "joule": 1.0, "kilojoule": 1_0_0_0, "megajoule": 1_0_0_0_0_0_0, "gigajoule": 1_0_0_0_0_0_0_0_0_0, "wattsecond": 1.0, "watthour": 3_6_0_0, "kilowatthour": 3_6_0_0_0_0_0, "newtonmeter": 1.0, "calorie_nutr": 4_1_8_6.8, "kilocalorie_nutr": 4_1_8_6_8_0_0.0_0, "electronvolt": 1.602_176_634E-19, "britishthermalunit_it": 1_0_5_5.0_5_5_8_5, "footpound": 1.35_58_18, } def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: lowerCamelCase_ : List[Any] = ( F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" F"""Valid values are: {', '.join(lowerCAmelCase_)}""" ) raise ValueError(lowerCAmelCase_) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
73
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ): lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18} lowerCamelCase_ : str = parent lowerCamelCase_ : str = batch_size lowerCamelCase_ : Tuple = num_channels lowerCamelCase_ : Optional[int] = image_size lowerCamelCase_ : List[str] = min_resolution lowerCamelCase_ : Tuple = max_resolution lowerCamelCase_ : Tuple = do_resize lowerCamelCase_ : Dict = size lowerCamelCase_ : List[str] = apply_ocr def _UpperCamelCase ( self ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self ) @property def _UpperCamelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , "do_resize" ) ) self.assertTrue(hasattr(a_ , "size" ) ) self.assertTrue(hasattr(a_ , "apply_ocr" ) ) def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def _UpperCamelCase ( self ): pass def _UpperCamelCase ( self ): # Initialize image_processing lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) self.assertIsInstance(encoding.words , a_ ) self.assertIsInstance(encoding.boxes , a_ ) # Test batched lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase ( self ): # Initialize image_processing lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase ( self ): # Initialize image_processing lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase ( self ): # with apply_OCR = True lowerCamelCase_ : Any = LayoutLMvaImageProcessor() from datasets import load_dataset lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" ) lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" ) lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , a_ ) self.assertListEqual(encoding.boxes , a_ ) # with apply_OCR = False lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ ) lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
73
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __magic_name__ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ '''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTMAEForPreTraining''', '''ViTMAELayer''', '''ViTMAEModel''', '''ViTMAEPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ '''TFViTMAEForPreTraining''', '''TFViTMAEModel''', '''TFViTMAEPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
73
from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''', '''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''', } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : List[Any] = '''luke''' def __init__( self , a_=5_0267 , a_=50_0000 , a_=768 , a_=256 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=True , a_=None , a_=1 , a_=0 , a_=2 , **a_ , ): super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ ) lowerCamelCase_ : Tuple = vocab_size lowerCamelCase_ : Optional[int] = entity_vocab_size lowerCamelCase_ : Any = hidden_size lowerCamelCase_ : Dict = entity_emb_size lowerCamelCase_ : List[Any] = num_hidden_layers lowerCamelCase_ : int = num_attention_heads lowerCamelCase_ : Union[str, Any] = hidden_act lowerCamelCase_ : Tuple = intermediate_size lowerCamelCase_ : Optional[Any] = hidden_dropout_prob lowerCamelCase_ : Any = attention_probs_dropout_prob lowerCamelCase_ : Optional[Any] = max_position_embeddings lowerCamelCase_ : str = type_vocab_size lowerCamelCase_ : int = initializer_range lowerCamelCase_ : List[Any] = layer_norm_eps lowerCamelCase_ : Optional[int] = use_entity_aware_attention lowerCamelCase_ : str = classifier_dropout
73
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : List[str] = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase_) lowerCamelCase_ : Optional[Any] = parser.add_subparsers(help="accelerate command helpers") # Register commands get_config_parser(subparsers=lowerCAmelCase_) env_command_parser(subparsers=lowerCAmelCase_) launch_command_parser(subparsers=lowerCAmelCase_) tpu_command_parser(subparsers=lowerCAmelCase_) test_command_parser(subparsers=lowerCAmelCase_) # Let's go lowerCamelCase_ : Any = parser.parse_args() if not hasattr(lowerCAmelCase_ , "func"): parser.print_help() exit(1) # Run args.func(lowerCAmelCase_) if __name__ == "__main__": main()
73
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int __magic_name__ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class lowerCAmelCase__ ( datasets.BuilderConfig ): """simple docstring""" __UpperCAmelCase : Optional[datasets.Features] = None def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , ): '''simple docstring''' import pyspark def generate_fn(): lowerCamelCase_ : Dict = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id")) for partition_id in partition_order: lowerCamelCase_ : Dict = df_with_partition_id.select("*").where(F"""part_id = {partition_id}""").drop("part_id") lowerCamelCase_ : Dict = partition_df.collect() lowerCamelCase_ : Dict = 0 for row in rows: yield F"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class lowerCAmelCase__ ( _BaseExamplesIterable ): """simple docstring""" def __init__( self , a_ , a_=None , ): lowerCamelCase_ : Dict = df lowerCamelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() ) lowerCamelCase_ : int = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self ): yield from self.generate_examples_fn() def _UpperCamelCase ( self , a_ ): lowerCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(a_ ) return SparkExamplesIterable(self.df , partition_order=a_ ) def _UpperCamelCase ( self , a_ , a_ ): lowerCamelCase_ : Dict = self.split_shard_indices_by_worker(a_ , a_ ) return SparkExamplesIterable(self.df , partition_order=a_ ) @property def _UpperCamelCase ( self ): return len(self.partition_order ) class lowerCAmelCase__ ( datasets.DatasetBuilder ): """simple docstring""" __UpperCAmelCase : Any = SparkConfig def __init__( self , a_ , a_ = None , a_ = None , **a_ , ): import pyspark lowerCamelCase_ : str = pyspark.sql.SparkSession.builder.getOrCreate() lowerCamelCase_ : Optional[Any] = df lowerCamelCase_ : List[Any] = working_dir super().__init__( cache_dir=a_ , config_name=str(self.df.semanticHash() ) , **a_ , ) def _UpperCamelCase ( self ): # Returns the path of the created file. def create_cache_and_write_probe(a_ ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=a_ ) lowerCamelCase_ : Optional[Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(a_ , "a" ) return [probe_file] if self._spark.conf.get("spark.master" , "" ).startswith("local" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: lowerCamelCase_ : List[str] = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" ) def _UpperCamelCase ( self ): return datasets.DatasetInfo(features=self.config.features ) def _UpperCamelCase ( self , a_ ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def _UpperCamelCase ( self , a_ ): import pyspark def get_arrow_batch_size(a_ ): for batch in it: yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} ) lowerCamelCase_ : str = self.df.count() lowerCamelCase_ : List[Any] = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. lowerCamelCase_ : Any = ( self.df.limit(a_ ) .repartition(1 ) .mapInArrow(a_ , "batch_bytes: long" ) .agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) ) .collect()[0] .sample_bytes / sample_num_rows ) lowerCamelCase_ : int = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. lowerCamelCase_ : Union[str, Any] = min(a_ , int(approx_total_size / max_shard_size ) ) lowerCamelCase_ : int = self.df.repartition(a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , ): import pyspark lowerCamelCase_ : str = ParquetWriter if file_format == "parquet" else ArrowWriter lowerCamelCase_ : int = os.path.join(self._working_dir , os.path.basename(a_ ) ) if self._working_dir else fpath lowerCamelCase_ : Optional[Any] = file_format == "parquet" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. lowerCamelCase_ : int = self.config.features lowerCamelCase_ : Any = self._writer_batch_size lowerCamelCase_ : Tuple = self._fs.storage_options def write_arrow(a_ ): # Within the same SparkContext, no two task attempts will share the same attempt ID. lowerCamelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId() lowerCamelCase_ : Optional[int] = next(a_ , a_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , ) lowerCamelCase_ : List[Any] = 0 lowerCamelCase_ : Optional[int] = writer_class( features=a_ , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , ) lowerCamelCase_ : Optional[Any] = pa.Table.from_batches([first_batch] ) writer.write_table(a_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: lowerCamelCase_ ,lowerCamelCase_ : List[str] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) shard_id += 1 lowerCamelCase_ : List[str] = writer_class( features=writer._features , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , ) lowerCamelCase_ : Optional[int] = pa.Table.from_batches([batch] ) writer.write_table(a_ ) if writer._num_bytes > 0: lowerCamelCase_ ,lowerCamelCase_ : Dict = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(a_ ) ): lowerCamelCase_ : str = os.path.join(os.path.dirname(a_ ) , os.path.basename(a_ ) ) shutil.move(a_ , a_ ) lowerCamelCase_ : int = ( self.df.mapInArrow(a_ , "task_id: long, num_examples: long, num_bytes: long" ) .groupBy("task_id" ) .agg( pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def _UpperCamelCase ( self , a_ , a_ = "arrow" , a_ = None , a_ = None , **a_ , ): self._validate_cache_dir() lowerCamelCase_ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(a_ ) lowerCamelCase_ : Dict = not is_remote_filesystem(self._fs ) lowerCamelCase_ : List[str] = os.path.join if is_local else posixpath.join lowerCamelCase_ : Any = "-TTTTT-SSSSS-of-NNNNN" lowerCamelCase_ : List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" lowerCamelCase_ : int = path_join(self._output_dir , a_ ) lowerCamelCase_ : int = 0 lowerCamelCase_ : Optional[Any] = 0 lowerCamelCase_ : int = 0 lowerCamelCase_ : Dict = [] lowerCamelCase_ : Any = [] for task_id, content in self._prepare_split_single(a_ , a_ , a_ ): ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) : Tuple = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(a_ ) lowerCamelCase_ : Dict = total_num_examples lowerCamelCase_ : Any = total_num_bytes # should rename everything at the end logger.debug(F"""Renaming {total_shards} shards.""" ) if total_shards > 1: lowerCamelCase_ : List[Any] = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. lowerCamelCase_ : Any = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( a_ , a_ , a_ , ): rename( a_ , fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , F"""{global_shard_id:05d}""" ).replace("NNNNN" , F"""{total_shards:05d}""" ) , ) lowerCamelCase_ : Optional[int] = [] lowerCamelCase_ : Dict = 0 for i in range(len(a_ ) ): lowerCamelCase_ ,lowerCamelCase_ : Tuple = task_id_and_num_shards[i] for shard_id in range(a_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(a_ , len(a_ ) ).map(lambda a_ : _rename_shard(*a_ ) ).collect() else: # don't use any pattern lowerCamelCase_ : int = 0 lowerCamelCase_ : Optional[int] = task_id_and_num_shards[0][0] self._rename( fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace(a_ , "" ) , ) def _UpperCamelCase ( self , a_ , ): return SparkExamplesIterable(self.df )
73
1
import itertools import math def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCAmelCase_) + 1) , 6): if number % i == 0 or number % (i + 2) == 0: return False return True def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : str = 2 while True: if is_prime(lowerCAmelCase_): yield num num += 1 def __magic_name__ ( lowerCAmelCase_ = 1_0001): '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , lowerCAmelCase_)) if __name__ == "__main__": print(f'''{solution() = }''')
73
from queue import PriorityQueue from typing import Any import numpy as np def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ): '''simple docstring''' for nxt, d in graph[v]: if nxt in visited_forward: continue lowerCamelCase_ : List[str] = cst_fwd.get(lowerCAmelCase_ , np.inf) lowerCamelCase_ : Dict = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt)) lowerCamelCase_ : Optional[int] = new_cost_f lowerCamelCase_ : List[str] = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: lowerCamelCase_ : Tuple = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[Any] = -1 lowerCamelCase_ : Tuple = set() lowerCamelCase_ : Dict = set() lowerCamelCase_ : int = {source: 0} lowerCamelCase_ : str = {destination: 0} lowerCamelCase_ : Tuple = {source: None} lowerCamelCase_ : Dict = {destination: None} lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue() lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue() lowerCamelCase_ : List[str] = np.inf queue_forward.put((0, source)) queue_backward.put((0, destination)) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): lowerCamelCase_ ,lowerCamelCase_ : List[Any] = queue_forward.get() visited_forward.add(lowerCAmelCase_) lowerCamelCase_ ,lowerCamelCase_ : str = queue_backward.get() visited_backward.add(lowerCAmelCase_) lowerCamelCase_ : Any = pass_and_relaxation( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) lowerCamelCase_ : Dict = pass_and_relaxation( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: lowerCamelCase_ : Union[str, Any] = shortest_distance return shortest_path_distance __magic_name__ = { '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } __magic_name__ = { '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
73
1
def __magic_name__ ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = True): '''simple docstring''' assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError("Invalid value for min_val or max_val (min_value < max_value)") return min_val if option else max_val def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' return int((number_a + number_a) / 2) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) ), 'argument values must be type of "int"' if lower > higher: raise ValueError("argument value for lower and higher must be(lower > higher)") if not lower < to_guess < higher: raise ValueError( "guess value must be within the range of lower and higher value") def answer(lowerCAmelCase_) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print("started...") lowerCamelCase_ : Optional[int] = lower lowerCamelCase_ : Tuple = higher lowerCamelCase_ : Union[str, Any] = [] while True: lowerCamelCase_ : Optional[int] = get_avg(lowerCAmelCase_ , lowerCAmelCase_) last_numbers.append(lowerCAmelCase_) if answer(lowerCAmelCase_) == "low": lowerCamelCase_ : Any = number elif answer(lowerCAmelCase_) == "high": lowerCamelCase_ : Optional[int] = number else: break print(F"""guess the number : {last_numbers[-1]}""") print(F"""details : {last_numbers!s}""") def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : Optional[int] = int(input("Enter lower value : ").strip()) lowerCamelCase_ : List[str] = int(input("Enter high value : ").strip()) lowerCamelCase_ : List[str] = int(input("Enter value to guess : ").strip()) guess_the_number(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) if __name__ == "__main__": main()
73
from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Dict = '''ctrl''' __UpperCAmelCase : Dict = ['''past_key_values'''] __UpperCAmelCase : int = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , a_=24_6534 , a_=256 , a_=1280 , a_=8192 , a_=48 , a_=16 , a_=0.1 , a_=0.1 , a_=1E-6 , a_=0.02 , a_=True , **a_ , ): lowerCamelCase_ : Dict = vocab_size lowerCamelCase_ : Any = n_positions lowerCamelCase_ : Optional[int] = n_embd lowerCamelCase_ : List[Any] = n_layer lowerCamelCase_ : Union[str, Any] = n_head lowerCamelCase_ : str = dff lowerCamelCase_ : Tuple = resid_pdrop lowerCamelCase_ : Any = embd_pdrop lowerCamelCase_ : Dict = layer_norm_epsilon lowerCamelCase_ : Tuple = initializer_range lowerCamelCase_ : Any = use_cache super().__init__(**a_ )
73
1
from __future__ import annotations def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : list[list[int]] = [] lowerCamelCase_ : list[int] = [] lowerCamelCase_ : str = 0 lowerCamelCase_ : Optional[Any] = sum(lowerCAmelCase_) create_state_space_tree(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) return result def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ): '''simple docstring''' if sum(lowerCAmelCase_) > max_sum or (remaining_nums_sum + sum(lowerCAmelCase_)) < max_sum: return if sum(lowerCAmelCase_) == max_sum: result.append(lowerCAmelCase_) return for index in range(lowerCAmelCase_ , len(lowerCAmelCase_)): create_state_space_tree( lowerCAmelCase_ , lowerCAmelCase_ , index + 1 , [*path, nums[index]] , lowerCAmelCase_ , remaining_nums_sum - nums[index] , ) __magic_name__ = [3, 3_4, 4, 1_2, 5, 2] __magic_name__ = 9 __magic_name__ = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
73
import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process __magic_name__ = logging.getLogger(__name__) __magic_name__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) __magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCAmelCase__ : """simple docstring""" __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) }, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCamelCase )}, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) }, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, ) __UpperCAmelCase : bool = field( default=__lowerCamelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, ) __UpperCAmelCase : str = field( default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, ) __UpperCAmelCase : bool = field( default=__lowerCamelCase, metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) }, ) def _UpperCamelCase ( self ): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class lowerCAmelCase__ : """simple docstring""" __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __UpperCAmelCase : Optional[str] = field(default=__lowerCamelCase, metadata={'''help''': '''The input training data file (a text file).'''} ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''}, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''}, ) __UpperCAmelCase : bool = field( default=__lowerCamelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) __UpperCAmelCase : Optional[int] = field( default=5, metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' }, ) __UpperCAmelCase : Optional[int] = field( default=__lowerCamelCase, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated. Default to the max input length of the model.''' ) }, ) __UpperCAmelCase : Optional[int] = field( default=__lowerCamelCase, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, ) __UpperCAmelCase : float = field( default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} ) __UpperCAmelCase : bool = field( default=__lowerCamelCase, metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) }, ) def _UpperCamelCase ( self ): if self.train_file is not None: lowerCamelCase_ : str = self.train_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: lowerCamelCase_ : Union[str, Any] = self.validation_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' with open(lowerCAmelCase_ , "r" , encoding="utf-8") as f: lowerCamelCase_ : Tuple = [json.loads(lowerCAmelCase_) for line in f.read().splitlines() if (len(lowerCAmelCase_) > 0 and not line.isspace())] assert len(lowerCAmelCase_) == len(lowerCAmelCase_) lowerCamelCase_ : Any = {c: dataset[c] for c in dataset.column_names} lowerCamelCase_ : List[Any] = refs return Dataset.from_dict(lowerCAmelCase_) def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : str = parser.parse_args_into_dataclasses() # Detecting last checkpoint. lowerCamelCase_ : List[str] = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: lowerCamelCase_ : Dict = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome.") elif last_checkpoint is not None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch.") # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""") # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , lowerCAmelCase_) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowerCamelCase_ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name) if "validation" not in datasets.keys(): lowerCamelCase_ : Any = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , ) lowerCamelCase_ : Optional[int] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , ) else: lowerCamelCase_ : Dict = {} if data_args.train_file is not None: lowerCamelCase_ : str = data_args.train_file if data_args.validation_file is not None: lowerCamelCase_ : Any = data_args.validation_file lowerCamelCase_ : Any = data_args.train_file.split(".")[-1] if extension == "txt": lowerCamelCase_ : List[str] = "text" lowerCamelCase_ : Dict = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCamelCase_ : Optional[Any] = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_) elif model_args.model_name_or_path: lowerCamelCase_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_) else: lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(F"""Overriding config: {model_args.config_overrides}""") config.update_from_string(model_args.config_overrides) logger.info(F"""New config: {config}""") lowerCamelCase_ : List[str] = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: lowerCamelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_) elif model_args.model_name_or_path: lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name.") if model_args.model_name_or_path: lowerCamelCase_ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch") lowerCamelCase_ : Dict = AutoModelForMaskedLM.from_config(lowerCAmelCase_) model.resize_token_embeddings(len(lowerCAmelCase_)) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: lowerCamelCase_ : Optional[Any] = datasets["train"].column_names else: lowerCamelCase_ : Dict = datasets["validation"].column_names lowerCamelCase_ : Union[str, Any] = "text" if "text" in column_names else column_names[0] lowerCamelCase_ : Optional[Any] = "max_length" if data_args.pad_to_max_length else False def tokenize_function(lowerCAmelCase_): # Remove empty lines lowerCamelCase_ : str = [line for line in examples["text"] if len(lowerCAmelCase_) > 0 and not line.isspace()] return tokenizer(examples["text"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length) lowerCamelCase_ : str = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: lowerCamelCase_ : List[Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file) if data_args.validation_ref_file is not None: lowerCamelCase_ : List[str] = add_chinese_references( tokenized_datasets["validation"] , data_args.validation_ref_file) # If we have ref files, need to avoid it removed by trainer lowerCamelCase_ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file if has_ref: lowerCamelCase_ : Union[str, Any] = False # Data collator # This one will take care of randomly masking the tokens. lowerCamelCase_ : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability) # Initialize our Trainer lowerCamelCase_ : int = Trainer( model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , ) # Training if training_args.do_train: if last_checkpoint is not None: lowerCamelCase_ : Dict = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path): lowerCamelCase_ : Dict = model_args.model_name_or_path else: lowerCamelCase_ : int = None lowerCamelCase_ : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_) trainer.save_model() # Saves the tokenizer too for easy upload lowerCamelCase_ : Tuple = os.path.join(training_args.output_dir , "train_results.txt") if trainer.is_world_process_zero(): with open(lowerCAmelCase_ , "w") as writer: logger.info("***** Train results *****") for key, value in sorted(train_result.metrics.items()): logger.info(F""" {key} = {value}""") writer.write(F"""{key} = {value}\n""") # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json")) # Evaluation lowerCamelCase_ : Dict = {} if training_args.do_eval: logger.info("*** Evaluate ***") lowerCamelCase_ : Tuple = trainer.evaluate() lowerCamelCase_ : str = math.exp(eval_output["eval_loss"]) lowerCamelCase_ : Tuple = perplexity lowerCamelCase_ : int = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt") if trainer.is_world_process_zero(): with open(lowerCAmelCase_ , "w") as writer: logger.info("***** Eval results *****") for key, value in sorted(results.items()): logger.info(F""" {key} = {value}""") writer.write(F"""{key} = {value}\n""") return results def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' main() if __name__ == "__main__": main()
73
1
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Optional[Any] = '''gptj''' __UpperCAmelCase : Any = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , a_=5_0400 , a_=2048 , a_=4096 , a_=28 , a_=16 , a_=64 , a_=None , a_="gelu_new" , a_=0.0 , a_=0.0 , a_=0.0 , a_=1E-5 , a_=0.02 , a_=True , a_=5_0256 , a_=5_0256 , a_=False , **a_ , ): lowerCamelCase_ : Dict = vocab_size lowerCamelCase_ : Any = n_positions lowerCamelCase_ : Tuple = n_embd lowerCamelCase_ : List[Any] = n_layer lowerCamelCase_ : Tuple = n_head lowerCamelCase_ : Optional[Any] = n_inner lowerCamelCase_ : Optional[Any] = rotary_dim lowerCamelCase_ : Dict = activation_function lowerCamelCase_ : List[str] = resid_pdrop lowerCamelCase_ : List[str] = embd_pdrop lowerCamelCase_ : Tuple = attn_pdrop lowerCamelCase_ : Optional[int] = layer_norm_epsilon lowerCamelCase_ : Tuple = initializer_range lowerCamelCase_ : int = use_cache lowerCamelCase_ : int = bos_token_id lowerCamelCase_ : List[Any] = eos_token_id super().__init__( bos_token_id=a_ , eos_token_id=a_ , tie_word_embeddings=a_ , **a_ ) class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def __init__( self , a_ , a_ = "default" , a_ = None , a_ = False , ): super().__init__(a_ , task=a_ , patching_specs=a_ , use_past=a_ ) if not getattr(self._config , "pad_token_id" , a_ ): # TODO: how to do that better? lowerCamelCase_ : int = 0 @property def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(a_ , direction="inputs" ) lowerCamelCase_ : Optional[Any] = {0: "batch", 1: "past_sequence + sequence"} else: lowerCamelCase_ : Optional[Any] = {0: "batch", 1: "sequence"} return common_inputs @property def _UpperCamelCase ( self ): return self._config.n_layer @property def _UpperCamelCase ( self ): return self._config.n_head def _UpperCamelCase ( self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , ): lowerCamelCase_ : str = super(a_ , self ).generate_dummy_inputs( a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ ) # We need to order the input in the way they appears in the forward() lowerCamelCase_ : Dict = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCamelCase_ : Tuple = seqlen + 2 lowerCamelCase_ : Union[str, Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCamelCase_ : List[str] = [ (torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(self.num_layers ) ] lowerCamelCase_ : Union[str, Any] = common_inputs["attention_mask"] if self.use_past: lowerCamelCase_ : Optional[int] = ordered_inputs["attention_mask"].dtype lowerCamelCase_ : Any = torch.cat( [ordered_inputs["attention_mask"], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 ) return ordered_inputs @property def _UpperCamelCase ( self ): return 13
73
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class lowerCAmelCase__ : """simple docstring""" # setable values __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Optional[jnp.ndarray] = None __UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def _UpperCamelCase ( cls ): return cls() @dataclass class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : KarrasVeSchedulerState class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase ): """simple docstring""" @property def _UpperCamelCase ( self ): return True @register_to_config def __init__( self , a_ = 0.02 , a_ = 100 , a_ = 1.0_07 , a_ = 80 , a_ = 0.05 , a_ = 50 , ): pass def _UpperCamelCase ( self ): return KarrasVeSchedulerState.create() def _UpperCamelCase ( self , a_ , a_ , a_ = () ): lowerCamelCase_ : List[Any] = jnp.arange(0 , a_ )[::-1].copy() lowerCamelCase_ : List[str] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=a_ , schedule=jnp.array(a_ , dtype=jnp.floataa ) , timesteps=a_ , ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , ): if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase_ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: lowerCamelCase_ : Optional[int] = 0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase_ : Union[str, Any] = random.split(a_ , num=1 ) lowerCamelCase_ : str = self.config.s_noise * random.normal(key=a_ , shape=sample.shape ) lowerCamelCase_ : List[str] = sigma + gamma * sigma lowerCamelCase_ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ = True , ): lowerCamelCase_ : List[str] = sample_hat + sigma_hat * model_output lowerCamelCase_ : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat lowerCamelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ): lowerCamelCase_ : Optional[Any] = sample_prev + sigma_prev * model_output lowerCamelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev lowerCamelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ): raise NotImplementedError()
73
1
from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None): '''simple docstring''' if attention_mask is None: lowerCamelCase_ : int = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id) , tf.inta) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class lowerCAmelCase__ : """simple docstring""" __UpperCAmelCase : Optional[int] = OPTConfig __UpperCAmelCase : Optional[int] = {} __UpperCAmelCase : Any = '''gelu''' def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=False , a_=99 , a_=16 , a_=2 , a_=4 , a_=4 , a_="gelu" , a_=0.1 , a_=0.1 , a_=20 , a_=2 , a_=1 , a_=0 , a_=16 , a_=16 , ): lowerCamelCase_ : List[str] = parent lowerCamelCase_ : Union[str, Any] = batch_size lowerCamelCase_ : str = seq_length lowerCamelCase_ : str = is_training lowerCamelCase_ : int = use_labels lowerCamelCase_ : Dict = vocab_size lowerCamelCase_ : str = hidden_size lowerCamelCase_ : List[str] = num_hidden_layers lowerCamelCase_ : str = num_attention_heads lowerCamelCase_ : List[Any] = intermediate_size lowerCamelCase_ : Optional[Any] = hidden_act lowerCamelCase_ : Tuple = hidden_dropout_prob lowerCamelCase_ : int = attention_probs_dropout_prob lowerCamelCase_ : int = max_position_embeddings lowerCamelCase_ : Tuple = eos_token_id lowerCamelCase_ : Union[str, Any] = pad_token_id lowerCamelCase_ : Union[str, Any] = bos_token_id lowerCamelCase_ : Optional[int] = embed_dim lowerCamelCase_ : Dict = word_embed_proj_dim lowerCamelCase_ : List[str] = False def _UpperCamelCase ( self ): lowerCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCamelCase_ : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCamelCase_ : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCamelCase_ : Optional[Any] = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=a_ , **self.config_updates , ) lowerCamelCase_ : Union[str, Any] = prepare_opt_inputs_dict(a_ , a_ ) return config, inputs_dict def _UpperCamelCase ( self , a_ , a_ ): lowerCamelCase_ : Dict = TFOPTModel(config=a_ ) lowerCamelCase_ : int = inputs_dict["input_ids"] lowerCamelCase_ : List[Any] = input_ids[:1, :] lowerCamelCase_ : Optional[Any] = inputs_dict["attention_mask"][:1, :] lowerCamelCase_ : Optional[Any] = 1 # first forward pass lowerCamelCase_ : Tuple = model(a_ , attention_mask=a_ , use_cache=a_ ) lowerCamelCase_ ,lowerCamelCase_ : Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCamelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowerCamelCase_ : Dict = tf.concat([input_ids, next_tokens] , axis=-1 ) lowerCamelCase_ : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowerCamelCase_ : Optional[int] = model(a_ , attention_mask=a_ )[0] lowerCamelCase_ : int = model(a_ , attention_mask=a_ , past_key_values=a_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowerCamelCase_ : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowerCamelCase_ : Dict = output_from_no_past[:, -3:, random_slice_idx] lowerCamelCase_ : List[Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(a_ , a_ , rtol=1E-3 ) @require_tf class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Tuple = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () __UpperCAmelCase : Any = (TFOPTForCausalLM,) if is_tf_available() else () __UpperCAmelCase : str = ( {'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {} ) __UpperCAmelCase : str = False __UpperCAmelCase : int = False __UpperCAmelCase : str = False __UpperCAmelCase : Optional[int] = 10 def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[Any] = TFOPTModelTester(self ) lowerCamelCase_ : List[str] = ConfigTester(self , config_class=a_ ) def _UpperCamelCase ( self ): self.config_tester.run_common_tests() def _UpperCamelCase ( self ): lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ ,lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(a_ , a_ ): if hasattr(a_ , "weight" ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(a_ , "weight" ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings lowerCamelCase_ : Tuple = model_class(config=a_ ) lowerCamelCase_ : Optional[Any] = _get_word_embedding_weight(a_ , model.get_input_embeddings() ) lowerCamelCase_ : str = _get_word_embedding_weight(a_ , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(a_ ) lowerCamelCase_ : Tuple = _get_word_embedding_weight(a_ , model.get_input_embeddings() ) lowerCamelCase_ : int = _get_word_embedding_weight(a_ , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. lowerCamelCase_ : Union[str, Any] = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , a_ ) # check that weights remain the same after resizing lowerCamelCase_ : Tuple = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: lowerCamelCase_ : List[Any] = False self.assertTrue(a_ ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , a_ ) lowerCamelCase_ : Optional[Any] = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: lowerCamelCase_ : List[str] = False self.assertTrue(a_ ) def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' return tf.constant(lowerCAmelCase_ , dtype=tf.intaa) @require_tf class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase : int = 99 def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = tf.ones((4, 1) , dtype=tf.intaa ) * 2 lowerCamelCase_ : Dict = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) lowerCamelCase_ : str = input_ids.shape[0] lowerCamelCase_ : List[Any] = OPTConfig( vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @slow def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = TFOPTModel.from_pretrained("facebook/opt-350m" ) lowerCamelCase_ : Dict = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) lowerCamelCase_ : str = tf.not_equal(a_ , model.config.pad_token_id ) with tf.GradientTape(): lowerCamelCase_ : List[Any] = model(input_ids=a_ , attention_mask=a_ ).last_hidden_state lowerCamelCase_ : str = (1, 11, 512) self.assertEqual(output.shape , a_ ) lowerCamelCase_ : Optional[int] = tf.constant( [[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] ) self.assertTrue(np.allclose(output[:, :3, :3] , a_ , atol=4E-3 ) ) lowerCamelCase_ : Optional[int] = tf.function(a_ , jit_compile=a_ ) lowerCamelCase_ : int = xla_generate(a_ , a_ )[0] self.assertTrue(np.allclose(output[:, :3, :3] , a_ , atol=4E-2 ) ) @require_tf @slow class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase ( self ): super().setUp() lowerCamelCase_ : Tuple = "facebook/opt-350m" def _UpperCamelCase ( self ): lowerCamelCase_ : Any = TFOPTForCausalLM.from_pretrained(self.path_model ) lowerCamelCase_ : Any = GPTaTokenizer.from_pretrained(self.path_model ) lowerCamelCase_ : List[str] = [ "Today is a beautiful day and I want to", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False lowerCamelCase_ : Optional[int] = tokenizer(a_ , return_tensors="tf" , padding=a_ , add_special_tokens=a_ ) lowerCamelCase_ : Dict = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) lowerCamelCase_ : Dict = tf.constant( [ [1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70], [-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22], [0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03], [6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77], ] ) self.assertTrue(np.allclose(a_ , a_ , atol=1E-4 ) ) lowerCamelCase_ : Optional[int] = tf.function(a_ , jit_compile=a_ ) lowerCamelCase_ : Optional[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(a_ , a_ , atol=1E-4 ) ) @require_tf @slow class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @property def _UpperCamelCase ( self ): return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def _UpperCamelCase ( self ): lowerCamelCase_ : str = "facebook/opt-125m" lowerCamelCase_ : Dict = [ "Today is a beautiful day and I want to", "In the city of New York, the city", "Paris is the capital of France and the capital", "Computers and mobile phones have taken over the", ] lowerCamelCase_ : List[str] = [] lowerCamelCase_ : int = GPTaTokenizer.from_pretrained(a_ ) lowerCamelCase_ : Dict = TFOPTForCausalLM.from_pretrained(a_ ) for prompt in self.prompts: lowerCamelCase_ : str = tokenizer(a_ , return_tensors="tf" ).input_ids lowerCamelCase_ : Any = model.generate(a_ , max_length=10 ) lowerCamelCase_ : str = tokenizer.batch_decode(a_ , skip_special_tokens=a_ ) predicted_outputs += generated_string self.assertListEqual(a_ , a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = "facebook/opt-350m" lowerCamelCase_ : Any = GPTaTokenizer.from_pretrained(a_ ) lowerCamelCase_ : List[str] = TFOPTForCausalLM.from_pretrained(a_ ) lowerCamelCase_ : Dict = "left" # use different length sentences to test batching lowerCamelCase_ : Tuple = [ "Hello, my dog is a little", "Today, I", ] lowerCamelCase_ : List[Any] = tokenizer(a_ , return_tensors="tf" , padding=a_ ) lowerCamelCase_ : Union[str, Any] = inputs["input_ids"] lowerCamelCase_ : str = model.generate(input_ids=a_ , attention_mask=inputs["attention_mask"] ) lowerCamelCase_ : Tuple = tokenizer(sentences[0] , return_tensors="tf" ).input_ids lowerCamelCase_ : Optional[int] = model.generate(input_ids=a_ ) lowerCamelCase_ : Optional[Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs["attention_mask"][-1] , tf.intaa ) ) lowerCamelCase_ : List[str] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids lowerCamelCase_ : List[str] = model.generate(input_ids=a_ , max_length=model.config.max_length - num_paddings ) lowerCamelCase_ : List[Any] = tokenizer.batch_decode(a_ , skip_special_tokens=a_ ) lowerCamelCase_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a_ ) lowerCamelCase_ : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=a_ ) lowerCamelCase_ : Union[str, Any] = [ "Hello, my dog is a little bit of a dork.\nI'm a little bit", "Today, I was in the middle of a conversation with a friend about the", ] self.assertListEqual(a_ , a_ ) self.assertListEqual(a_ , [non_padded_sentence, padded_sentence] ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = "facebook/opt-350m" lowerCamelCase_ : Dict = [ "Today is a beautiful day and I want to", "In the city of San Francisco, the city", "Paris is the capital of France and the capital", "Computers and mobile phones have taken over the", ] lowerCamelCase_ : Dict = [] lowerCamelCase_ : str = GPTaTokenizer.from_pretrained(a_ ) lowerCamelCase_ : List[Any] = TFOPTForCausalLM.from_pretrained(a_ ) for prompt in self.prompts: lowerCamelCase_ : List[Any] = tokenizer(a_ , return_tensors="tf" ).input_ids lowerCamelCase_ : Optional[Any] = model.generate(a_ , max_length=10 ) lowerCamelCase_ : Optional[Any] = tokenizer.batch_decode(a_ , skip_special_tokens=a_ ) predicted_outputs += generated_string self.assertListEqual(a_ , a_ )
73
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Any = StableDiffusionDiffEditPipeline __UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} __UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} __UpperCAmelCase : List[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __UpperCAmelCase : List[str] = frozenset([] ) def _UpperCamelCase ( self ): torch.manual_seed(0 ) lowerCamelCase_ : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , ) lowerCamelCase_ : str = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , ) lowerCamelCase_ : Dict = DDIMInverseScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_zero=a_ , ) torch.manual_seed(0 ) lowerCamelCase_ : List[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCamelCase_ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) lowerCamelCase_ : Optional[Any] = CLIPTextModel(a_ ) lowerCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowerCamelCase_ : Optional[Any] = { "unet": unet, "scheduler": scheduler, "inverse_scheduler": inverse_scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def _UpperCamelCase ( self , a_ , a_=0 ): lowerCamelCase_ : str = floats_tensor((1, 16, 16) , rng=random.Random(a_ ) ).to(a_ ) lowerCamelCase_ : List[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a_ ) ).to(a_ ) if str(a_ ).startswith("mps" ): lowerCamelCase_ : List[Any] = torch.manual_seed(a_ ) else: lowerCamelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(a_ ) lowerCamelCase_ : Tuple = { "prompt": "a dog and a newt", "mask_image": mask, "image_latents": latents, "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _UpperCamelCase ( self , a_ , a_=0 ): lowerCamelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ ) lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase_ : Any = Image.fromarray(np.uinta(a_ ) ).convert("RGB" ) if str(a_ ).startswith("mps" ): lowerCamelCase_ : Tuple = torch.manual_seed(a_ ) else: lowerCamelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(a_ ) lowerCamelCase_ : int = { "image": image, "source_prompt": "a cat and a frog", "target_prompt": "a dog and a newt", "generator": generator, "num_inference_steps": 2, "num_maps_per_mask": 2, "mask_encode_strength": 1.0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _UpperCamelCase ( self , a_ , a_=0 ): lowerCamelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ ) lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase_ : Optional[int] = Image.fromarray(np.uinta(a_ ) ).convert("RGB" ) if str(a_ ).startswith("mps" ): lowerCamelCase_ : Optional[int] = torch.manual_seed(a_ ) else: lowerCamelCase_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ ) lowerCamelCase_ : Union[str, Any] = { "image": image, "prompt": "a cat and a frog", "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "decode_latents": True, "output_type": "numpy", } return inputs def _UpperCamelCase ( self ): if not hasattr(self.pipeline_class , "_optional_components" ): return lowerCamelCase_ : List[Any] = self.get_dummy_components() lowerCamelCase_ : int = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(a_ , a_ , a_ ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) lowerCamelCase_ : int = self.get_dummy_inputs(a_ ) lowerCamelCase_ : int = pipe(**a_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(a_ ) lowerCamelCase_ : Optional[int] = self.pipeline_class.from_pretrained(a_ ) pipe_loaded.to(a_ ) pipe_loaded.set_progress_bar_config(disable=a_ ) for optional_component in pipe._optional_components: self.assertTrue( getattr(a_ , a_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , ) lowerCamelCase_ : List[str] = self.get_dummy_inputs(a_ ) lowerCamelCase_ : Optional[int] = pipe_loaded(**a_ )[0] lowerCamelCase_ : Optional[int] = np.abs(output - output_loaded ).max() self.assertLess(a_ , 1E-4 ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = "cpu" lowerCamelCase_ : int = self.get_dummy_components() lowerCamelCase_ : List[Any] = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : Any = self.get_dummy_mask_inputs(a_ ) lowerCamelCase_ : int = pipe.generate_mask(**a_ ) lowerCamelCase_ : List[Any] = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) lowerCamelCase_ : List[str] = np.array([0] * 9 ) lowerCamelCase_ : Optional[int] = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_ , 1E-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = "cpu" lowerCamelCase_ : Union[str, Any] = self.get_dummy_components() lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : Dict = self.get_dummy_inversion_inputs(a_ ) lowerCamelCase_ : Dict = pipe.invert(**a_ ).images lowerCamelCase_ : str = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowerCamelCase_ : Dict = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_ , 1E-3 ) def _UpperCamelCase ( self ): super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = "cpu" lowerCamelCase_ : int = self.get_dummy_components() lowerCamelCase_ : int = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"} lowerCamelCase_ : Optional[Any] = DPMSolverMultistepScheduler(**a_ ) lowerCamelCase_ : List[str] = DPMSolverMultistepInverseScheduler(**a_ ) lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : int = self.get_dummy_inversion_inputs(a_ ) lowerCamelCase_ : str = pipe.invert(**a_ ).images lowerCamelCase_ : int = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowerCamelCase_ : Union[str, Any] = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) lowerCamelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_ , 1E-3 ) @require_torch_gpu @slow class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def _UpperCamelCase ( cls ): lowerCamelCase_ : Dict = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" ) lowerCamelCase_ : int = raw_image.convert("RGB" ).resize((768, 768) ) lowerCamelCase_ : List[Any] = raw_image def _UpperCamelCase ( self ): lowerCamelCase_ : Dict = torch.manual_seed(0 ) lowerCamelCase_ : Tuple = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa ) lowerCamelCase_ : str = DDIMScheduler.from_config(pipe.scheduler.config ) lowerCamelCase_ : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : str = "a bowl of fruit" lowerCamelCase_ : Optional[int] = "a bowl of pears" lowerCamelCase_ : List[Any] = pipe.generate_mask( image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , ) lowerCamelCase_ : str = pipe.invert( prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ ).latents lowerCamelCase_ : List[str] = pipe( prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0] lowerCamelCase_ : List[str] = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 ) lowerCamelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa ) lowerCamelCase_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) lowerCamelCase_ : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : Any = "a bowl of fruit" lowerCamelCase_ : Dict = "a bowl of pears" lowerCamelCase_ : Optional[Any] = pipe.generate_mask( image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , ) lowerCamelCase_ : str = pipe.invert( prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ , num_inference_steps=25 , ).latents lowerCamelCase_ : Any = pipe( prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0] lowerCamelCase_ : List[str] = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
73
1
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : jnp.ndarray class lowerCAmelCase__ ( nn.Module ): """simple docstring""" __UpperCAmelCase : int __UpperCAmelCase : Tuple[int] = (16, 32, 96, 256) __UpperCAmelCase : jnp.dtype = jnp.floataa def _UpperCamelCase ( self ): lowerCamelCase_ : Union[str, Any] = nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) lowerCamelCase_ : Optional[int] = [] for i in range(len(self.block_out_channels ) - 1 ): lowerCamelCase_ : str = self.block_out_channels[i] lowerCamelCase_ : List[Any] = self.block_out_channels[i + 1] lowerCamelCase_ : Tuple = nn.Conv( a_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(a_ ) lowerCamelCase_ : Optional[Any] = nn.Conv( a_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(a_ ) lowerCamelCase_ : Union[str, Any] = blocks lowerCamelCase_ : int = nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , a_ ): lowerCamelCase_ : Optional[Any] = self.conv_in(a_ ) lowerCamelCase_ : Optional[Any] = nn.silu(a_ ) for block in self.blocks: lowerCamelCase_ : str = block(a_ ) lowerCamelCase_ : str = nn.silu(a_ ) lowerCamelCase_ : Any = self.conv_out(a_ ) return embedding @flax_register_to_config class lowerCAmelCase__ ( nn.Module, __lowerCamelCase, __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : int = 32 __UpperCAmelCase : int = 4 __UpperCAmelCase : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) __UpperCAmelCase : Union[bool, Tuple[bool]] = False __UpperCAmelCase : Tuple[int] = (320, 640, 1280, 1280) __UpperCAmelCase : int = 2 __UpperCAmelCase : Union[int, Tuple[int]] = 8 __UpperCAmelCase : Optional[Union[int, Tuple[int]]] = None __UpperCAmelCase : int = 1280 __UpperCAmelCase : float = 0.0 __UpperCAmelCase : bool = False __UpperCAmelCase : jnp.dtype = jnp.floataa __UpperCAmelCase : bool = True __UpperCAmelCase : int = 0 __UpperCAmelCase : str = "rgb" __UpperCAmelCase : Tuple[int] = (16, 32, 96, 256) def _UpperCamelCase ( self , a_ ): # init input tensors lowerCamelCase_ : Tuple = (1, self.in_channels, self.sample_size, self.sample_size) lowerCamelCase_ : Union[str, Any] = jnp.zeros(a_ , dtype=jnp.floataa ) lowerCamelCase_ : int = jnp.ones((1,) , dtype=jnp.intaa ) lowerCamelCase_ : int = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) lowerCamelCase_ : Union[str, Any] = (1, 3, self.sample_size * 8, self.sample_size * 8) lowerCamelCase_ : List[str] = jnp.zeros(a_ , dtype=jnp.floataa ) lowerCamelCase_ ,lowerCamelCase_ : List[str] = jax.random.split(a_ ) lowerCamelCase_ : Dict = {"params": params_rng, "dropout": dropout_rng} return self.init(a_ , a_ , a_ , a_ , a_ )["params"] def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = self.block_out_channels lowerCamelCase_ : Optional[Any] = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. lowerCamelCase_ : List[str] = self.num_attention_heads or self.attention_head_dim # input lowerCamelCase_ : Dict = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time lowerCamelCase_ : Union[str, Any] = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) lowerCamelCase_ : int = FlaxTimestepEmbedding(a_ , dtype=self.dtype ) lowerCamelCase_ : List[str] = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) lowerCamelCase_ : Dict = self.only_cross_attention if isinstance(a_ , a_ ): lowerCamelCase_ : List[Any] = (only_cross_attention,) * len(self.down_block_types ) if isinstance(a_ , a_ ): lowerCamelCase_ : Optional[Any] = (num_attention_heads,) * len(self.down_block_types ) # down lowerCamelCase_ : Tuple = [] lowerCamelCase_ : List[str] = [] lowerCamelCase_ : Dict = block_out_channels[0] lowerCamelCase_ : Any = nn.Conv( a_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(a_ ) for i, down_block_type in enumerate(self.down_block_types ): lowerCamelCase_ : List[str] = output_channel lowerCamelCase_ : Any = block_out_channels[i] lowerCamelCase_ : Optional[Any] = i == len(a_ ) - 1 if down_block_type == "CrossAttnDownBlock2D": lowerCamelCase_ : Dict = FlaxCrossAttnDownBlockaD( in_channels=a_ , out_channels=a_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: lowerCamelCase_ : Dict = FlaxDownBlockaD( in_channels=a_ , out_channels=a_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(a_ ) for _ in range(self.layers_per_block ): lowerCamelCase_ : Dict = nn.Conv( a_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(a_ ) if not is_final_block: lowerCamelCase_ : Union[str, Any] = nn.Conv( a_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(a_ ) lowerCamelCase_ : List[str] = down_blocks lowerCamelCase_ : Union[str, Any] = controlnet_down_blocks # mid lowerCamelCase_ : Optional[int] = block_out_channels[-1] lowerCamelCase_ : List[str] = FlaxUNetMidBlockaDCrossAttn( in_channels=a_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) lowerCamelCase_ : str = nn.Conv( a_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , a_ , a_ , a_ , a_ , a_ = 1.0 , a_ = True , a_ = False , ): lowerCamelCase_ : int = self.controlnet_conditioning_channel_order if channel_order == "bgr": lowerCamelCase_ : int = jnp.flip(a_ , axis=1 ) # 1. time if not isinstance(a_ , jnp.ndarray ): lowerCamelCase_ : List[str] = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(a_ , jnp.ndarray ) and len(timesteps.shape ) == 0: lowerCamelCase_ : str = timesteps.astype(dtype=jnp.floataa ) lowerCamelCase_ : Any = jnp.expand_dims(a_ , 0 ) lowerCamelCase_ : Optional[int] = self.time_proj(a_ ) lowerCamelCase_ : Any = self.time_embedding(a_ ) # 2. pre-process lowerCamelCase_ : Union[str, Any] = jnp.transpose(a_ , (0, 2, 3, 1) ) lowerCamelCase_ : Tuple = self.conv_in(a_ ) lowerCamelCase_ : Tuple = jnp.transpose(a_ , (0, 2, 3, 1) ) lowerCamelCase_ : int = self.controlnet_cond_embedding(a_ ) sample += controlnet_cond # 3. down lowerCamelCase_ : str = (sample,) for down_block in self.down_blocks: if isinstance(a_ , a_ ): lowerCamelCase_ ,lowerCamelCase_ : Dict = down_block(a_ , a_ , a_ , deterministic=not train ) else: lowerCamelCase_ ,lowerCamelCase_ : Tuple = down_block(a_ , a_ , deterministic=not train ) down_block_res_samples += res_samples # 4. mid lowerCamelCase_ : List[str] = self.mid_block(a_ , a_ , a_ , deterministic=not train ) # 5. contronet blocks lowerCamelCase_ : Union[str, Any] = () for down_block_res_sample, controlnet_block in zip(a_ , self.controlnet_down_blocks ): lowerCamelCase_ : Optional[int] = controlnet_block(a_ ) controlnet_down_block_res_samples += (down_block_res_sample,) lowerCamelCase_ : List[Any] = controlnet_down_block_res_samples lowerCamelCase_ : Any = self.controlnet_mid_block(a_ ) # 6. scaling lowerCamelCase_ : Optional[int] = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=a_ , mid_block_res_sample=a_ )
73
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase ( self ): lowerCamelCase_ : int = ["a", "b", "c"] # Defaults to last layer if both are None lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , a_ , a_ ) self.assertEqual(a_ , ["c"] ) self.assertEqual(a_ , [2] ) # Out indices set to match out features lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ ) self.assertEqual(a_ , ["a", "c"] ) self.assertEqual(a_ , [0, 2] ) # Out features set to match out indices lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ ) self.assertEqual(a_ , ["a", "c"] ) self.assertEqual(a_ , [0, 2] ) # Out features selected from negative indices lowerCamelCase_ ,lowerCamelCase_ : Dict = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ ) self.assertEqual(a_ , ["a", "c"] ) self.assertEqual(a_ , [-3, -1] ) def _UpperCamelCase ( self ): # Stage names must be set with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ ) # Out features must be a list with self.assertRaises(a_ ): verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] ) # Out features must be a subset of stage names with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] ) # Out indices must be a list or tuple with self.assertRaises(a_ ): verify_out_features_out_indices(a_ , 0 , ["a", "b"] ) # Out indices must be a subset of stage names with self.assertRaises(a_ ): verify_out_features_out_indices(a_ , (0, 1) , ["a"] ) # Out features and out indices must be the same length with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] ) # Out features should match out indices with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] ) # Out features and out indices should be in order with self.assertRaises(a_ ): verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] ) # Check passes with valid inputs verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = BackboneMixin() lowerCamelCase_ : List[Any] = ["a", "b", "c"] lowerCamelCase_ : Optional[int] = ["a", "c"] lowerCamelCase_ : Dict = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly lowerCamelCase_ : Union[str, Any] = ["a", "b"] self.assertEqual(backbone.out_features , ["a", "b"] ) self.assertEqual(backbone.out_indices , [0, 1] ) lowerCamelCase_ : str = [-3, -1] self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [-3, -1] )
73
1
from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = tf.convert_to_tensor( [ [ 8.2_22_09_91, # 3rd highest value; idx. 0 -0.5_62_00_44, 5.23_22_97_52, 4.0_38_63_93, -6.8_79_83_78, -0.54_78_58_02, -3.2_01_21_53, 2.92_77_71_76, 1.88_17_19_53, 7.35_34_12_76, # 5th highest value; idx. 9 8.43_20_78_33, # 2nd highest value; idx. 10 -9.85_71_18_36, -5.96_20_92_36, -1.13_03_91_61, -7.1_11_52_94, -0.8_36_96_33, -5.3_18_64_08, 7.06_42_74_07, 0.81_36_93_44, -0.82_02_38_17, -5.9_17_97_96, 0.58_81_34_43, -6.99_77_84_38, 4.71_55_11_89, -0.18_77_16_37, 7.44_02_07_59, # 4th highest value; idx. 25 9.38_45_09_87, # 1st highest value; idx. 26 2.12_66_29_41, -9.32_56_20_38, 2.35_65_25_22, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58_42_55_18, 4.53_13_92_38, -5.57_51_04_64, -6.28_03_06_99, -7.19_52_95_03, -4.02_12_25_51, 1.39_33_70_37, -6.06_70_70_57, 1.59_48_05_17, -9.64_31_19, 0.03_90_77_99, 0.67_23_17_62, -8.88_20_67_26, 6.27_11_59_22, # 4th highest value; idx. 13 2.28_52_07_23, 4.82_76_75_06, 4.30_42_13_68, 8.8_27_53_13, # 2nd highest value; idx. 17 5.44_02_99_58, # 5th highest value; idx. 18 -4.4_73_57_94, 7.38_57_95_36, # 3rd highest value; idx. 20 -2.91_05_16_63, 2.61_94_60_77, -2.5_67_47_62, -9.48_95_93_02, -4.02_92_26_45, -1.35_41_69_18, 9.67_70_23_23, # 1st highest value; idx. 27 -5.89_47_85_53, 1.85_37_04_67, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) lowerCamelCase_ : str = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above lowerCamelCase_ : Optional[int] = tf.convert_to_tensor( [8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above lowerCamelCase_ : str = tf_top_k_top_p_filtering(a_ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) lowerCamelCase_ : Union[str, Any] = output[output != -float("inf" )] lowerCamelCase_ : List[Any] = tf.cast( tf.where(tf.not_equal(a_ , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(a_ , a_ , rtol=1E-12 ) tf.debugging.assert_equal(a_ , a_ ) @require_tf class lowerCAmelCase__ ( unittest.TestCase, __lowerCamelCase ): """simple docstring""" # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): __UpperCAmelCase : Tuple = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def _UpperCamelCase ( self ): # TF-only test: tf.saved_model export lowerCamelCase_ : Tuple = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowerCamelCase_ : Dict = 2 lowerCamelCase_ : int = 2 class lowerCAmelCase__ ( tf.Module ): """simple docstring""" def __init__( self , a_ ): super(a_ , self ).__init__() lowerCamelCase_ : Tuple = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ), tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ), ) , jit_compile=a_ , ) def _UpperCamelCase ( self , a_ , a_ ): lowerCamelCase_ : int = self.model.generate( input_ids=a_ , attention_mask=a_ , max_new_tokens=a_ , return_dict_in_generate=a_ , ) return {"sequences": outputs["sequences"]} lowerCamelCase_ : int = [[2, 0], [102, 103]] lowerCamelCase_ : Optional[Any] = [[1, 0], [1, 1]] lowerCamelCase_ : Dict = DummyModel(model=a_ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(a_ , a_ , signatures={"serving_default": dummy_model.serving} ) lowerCamelCase_ : List[str] = tf.saved_model.load(a_ ).signatures["serving_default"] for batch_size in range(1 , len(a_ ) + 1 ): lowerCamelCase_ : Tuple = { "input_ids": tf.constant(dummy_input_ids[:batch_size] ), "attention_mask": tf.constant(dummy_attention_masks[:batch_size] ), } lowerCamelCase_ : Dict = serving_func(**a_ )["sequences"] lowerCamelCase_ : str = test_model.generate(**a_ , max_new_tokens=a_ ) tf.debugging.assert_equal(a_ , a_ ) @slow def _UpperCamelCase ( self ): # TF-only test: tf.saved_model export lowerCamelCase_ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowerCamelCase_ : int = 1 lowerCamelCase_ : Tuple = 2 class lowerCAmelCase__ ( tf.Module ): """simple docstring""" def __init__( self , a_ ): super(a_ , self ).__init__() lowerCamelCase_ : Tuple = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ), tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ), ) , jit_compile=a_ , ) def _UpperCamelCase ( self , a_ , a_ ): lowerCamelCase_ : Dict = self.model.generate( input_ids=a_ , attention_mask=a_ , max_new_tokens=a_ , return_dict_in_generate=a_ , ) return {"sequences": outputs["sequences"]} lowerCamelCase_ : Optional[int] = [[2], [102, 103]] lowerCamelCase_ : Union[str, Any] = [[1], [1, 1]] lowerCamelCase_ : List[Any] = DummyModel(model=a_ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(a_ , a_ , signatures={"serving_default": dummy_model.serving} ) lowerCamelCase_ : Optional[Any] = tf.saved_model.load(a_ ).signatures["serving_default"] for input_row in range(len(a_ ) ): lowerCamelCase_ : List[str] = { "input_ids": tf.constant([dummy_input_ids[input_row]] ), "attention_mask": tf.constant([dummy_attention_masks[input_row]] ), } lowerCamelCase_ : Optional[Any] = serving_func(**a_ )["sequences"] lowerCamelCase_ : Optional[Any] = test_model.generate(**a_ , max_new_tokens=a_ ) tf.debugging.assert_equal(a_ , a_ ) @slow @require_tensorflow_text def _UpperCamelCase ( self ): # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=a_ ) class lowerCAmelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self ): super().__init__() lowerCamelCase_ : int = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(a_ , "spiece.model" ) , "rb" ).read() ) lowerCamelCase_ : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" ) def _UpperCamelCase ( self , a_ , *a_ , **a_ ): lowerCamelCase_ : int = self.tokenizer.tokenize(a_ ) lowerCamelCase_ ,lowerCamelCase_ : List[Any] = text.pad_model_inputs( a_ , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) lowerCamelCase_ : Tuple = self.model.generate(input_ids=a_ , attention_mask=a_ ) return self.tokenizer.detokenize(a_ ) lowerCamelCase_ : Dict = CompleteSentenceTransformer() lowerCamelCase_ : List[Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" ) lowerCamelCase_ : str = complete_model(a_ ) lowerCamelCase_ : Any = tf.keras.Model(a_ , a_ ) keras_model.save(a_ ) def _UpperCamelCase ( self ): # Has PT equivalent: this test relies on random sampling lowerCamelCase_ : Optional[Any] = { "do_sample": True, "num_beams": 1, "top_p": 0.7, "top_k": 10, "temperature": 0.7, } lowerCamelCase_ : Tuple = 14 lowerCamelCase_ : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowerCamelCase_ : Dict = "Hello, my dog is cute and" lowerCamelCase_ : Dict = tokenizer(a_ , return_tensors="tf" ) lowerCamelCase_ : Tuple = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowerCamelCase_ : int = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(":/CPU:0" ): tf.random.set_seed(0 ) lowerCamelCase_ : List[Any] = model.generate(**a_ , eos_token_id=a_ , **a_ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) lowerCamelCase_ : Any = [638, 198] with tf.device(":/CPU:0" ): tf.random.set_seed(0 ) lowerCamelCase_ : Optional[Any] = model.generate(**a_ , eos_token_id=a_ , **a_ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def _UpperCamelCase ( self ): # Has PT equivalent: ample use of framework-specific code lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" ) lowerCamelCase_ : Tuple = "Hugging Face is a technology company based in New York and Paris." lowerCamelCase_ : Optional[Any] = bart_tokenizer(a_ , return_tensors="tf" ).input_ids lowerCamelCase_ : str = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" ) lowerCamelCase_ : Any = bart_model.generate(a_ ).numpy() class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def _UpperCamelCase ( self , a_ , a_=None , **a_ ): return super().call(a_ , **a_ ) lowerCamelCase_ : Union[str, Any] = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" ) lowerCamelCase_ : int = bart_model.generate(a_ , foo="bar" ).numpy() self.assertTrue(np.array_equal(a_ , a_ ) ) class lowerCAmelCase__ ( bart_model.model.encoder.__class__ ): """simple docstring""" def _UpperCamelCase ( self , a_ , **a_ ): return super().call(a_ , **a_ ) lowerCamelCase_ : List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared ) lowerCamelCase_ : int = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) lowerCamelCase_ : Optional[Any] = bart_model.generate(a_ ).numpy() with self.assertRaises(a_ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(a_ , foo="bar" )
73
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils ) __UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] ) __UpperCAmelCase : Tuple = ['''accelerate''', '''launch'''] __UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate''' __UpperCAmelCase : int = '''default_config.yaml''' __UpperCAmelCase : Tuple = config_folder / config_file __UpperCAmelCase : int = config_folder / '''_default_config.yaml''' __UpperCAmelCase : int = Path('''tests/test_configs''' ) @classmethod def _UpperCamelCase ( cls ): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def _UpperCamelCase ( cls ): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def _UpperCamelCase ( self ): for config in sorted(self.test_config_path.glob("**/*.yaml" ) ): with self.subTest(config_file=a_ ): execute_subprocess_async( self.base_cmd + ["--config_file", str(a_ ), self.test_file_path] , env=os.environ.copy() ) def _UpperCamelCase ( self ): execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() ) class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase : List[Any] = '''test-tpu''' __UpperCAmelCase : Tuple = '''us-central1-a''' __UpperCAmelCase : Tuple = '''ls''' __UpperCAmelCase : str = ['''accelerate''', '''tpu-config'''] __UpperCAmelCase : Dict = '''cd /usr/share''' __UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh''' __UpperCAmelCase : Dict = '''Running gcloud compute tpus tpu-vm ssh''' def _UpperCamelCase ( self ): lowerCamelCase_ : Any = run_command( self.cmd + ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = run_command( self.cmd + [ "--config_file", "tests/test_configs/0_12_0.yaml", "--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug", ] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Union[str, Any] = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a_ ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Any = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = run_command( self.cmd + [ "--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--command", "echo \"Hello World\"", "--debug", ] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Dict = run_command( self.cmd + [ "--config_file", "tests/test_configs/0_12_0.yaml", "--command_file", self.command_file, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug", ] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : str = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Any = run_command( self.cmd + [ "--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--accelerate_version", "12.0.0", "--debug", ] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
73
1
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class lowerCAmelCase__ : """simple docstring""" # setable values __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Optional[jnp.ndarray] = None __UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def _UpperCamelCase ( cls ): return cls() @dataclass class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : KarrasVeSchedulerState class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase ): """simple docstring""" @property def _UpperCamelCase ( self ): return True @register_to_config def __init__( self , a_ = 0.02 , a_ = 100 , a_ = 1.0_07 , a_ = 80 , a_ = 0.05 , a_ = 50 , ): pass def _UpperCamelCase ( self ): return KarrasVeSchedulerState.create() def _UpperCamelCase ( self , a_ , a_ , a_ = () ): lowerCamelCase_ : List[Any] = jnp.arange(0 , a_ )[::-1].copy() lowerCamelCase_ : List[str] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=a_ , schedule=jnp.array(a_ , dtype=jnp.floataa ) , timesteps=a_ , ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , ): if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase_ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: lowerCamelCase_ : Optional[int] = 0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase_ : Union[str, Any] = random.split(a_ , num=1 ) lowerCamelCase_ : str = self.config.s_noise * random.normal(key=a_ , shape=sample.shape ) lowerCamelCase_ : List[str] = sigma + gamma * sigma lowerCamelCase_ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ = True , ): lowerCamelCase_ : List[str] = sample_hat + sigma_hat * model_output lowerCamelCase_ : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat lowerCamelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ): lowerCamelCase_ : Optional[Any] = sample_prev + sigma_prev * model_output lowerCamelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev lowerCamelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ): raise NotImplementedError()
73
import inspect from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel, VQModel from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def __init__( self , a_ , a_ , a_ ): super().__init__() self.register_modules(vqvae=a_ , unet=a_ , scheduler=a_ ) @torch.no_grad() def __call__( self , a_ = 1 , a_ = None , a_ = 0.0 , a_ = 50 , a_ = "pil" , a_ = True , **a_ , ): lowerCamelCase_ : Optional[Any] = randn_tensor( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , ) lowerCamelCase_ : Optional[int] = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowerCamelCase_ : Optional[int] = latents * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(a_ ) # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature lowerCamelCase_ : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCamelCase_ : Optional[int] = {} if accepts_eta: lowerCamelCase_ : Optional[int] = eta for t in self.progress_bar(self.scheduler.timesteps ): lowerCamelCase_ : Dict = self.scheduler.scale_model_input(a_ , a_ ) # predict the noise residual lowerCamelCase_ : Optional[Any] = self.unet(a_ , a_ ).sample # compute the previous noisy sample x_t -> x_t-1 lowerCamelCase_ : List[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample # decode the image latents with the VAE lowerCamelCase_ : str = self.vqvae.decode(a_ ).sample lowerCamelCase_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 ) lowerCamelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCamelCase_ : Optional[Any] = self.numpy_to_pil(a_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=a_ )
73
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Optional[Any] = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) __UpperCAmelCase : str = '''CIDAS/clipseg-rd64-refined''' __UpperCAmelCase : Tuple = '''image_segmenter''' __UpperCAmelCase : Optional[Any] = CLIPSegForImageSegmentation __UpperCAmelCase : Union[str, Any] = ['''image''', '''text'''] __UpperCAmelCase : Optional[int] = ['''image'''] def __init__( self , *a_ , **a_ ): requires_backends(self , ["vision"] ) super().__init__(*a_ , **a_ ) def _UpperCamelCase ( self , a_ , a_ ): return self.pre_processor(text=[label] , images=[image] , padding=a_ , return_tensors="pt" ) def _UpperCamelCase ( self , a_ ): with torch.no_grad(): lowerCamelCase_ : int = self.model(**a_ ).logits return logits def _UpperCamelCase ( self , a_ ): lowerCamelCase_ : str = outputs.cpu().detach().numpy() lowerCamelCase_ : Union[str, Any] = 0 lowerCamelCase_ : Dict = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
73
import re def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' if len(re.findall("[ATCG]" , lowerCAmelCase_)) != len(lowerCAmelCase_): raise ValueError("Invalid Strand") return dna.translate(dna.maketrans("ATCG" , "TAGC")) if __name__ == "__main__": import doctest doctest.testmod()
73
1
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer lowerCamelCase_ : Optional[Any] = flax_key_tuple[:-1] + ("weight",) lowerCamelCase_ : Dict = torch.permute(lowerCAmelCase_ , (0, 2, 1)) elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase_): # linear layer lowerCamelCase_ : Union[str, Any] = flax_key_tuple[:-1] + ("weight",) lowerCamelCase_ : Union[str, Any] = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase_ : Optional[int] = flax_key_tuple[:-1] + ("weight",) return flax_key_tuple, flax_tensor def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if "metadata" in layer: lowerCamelCase_ : int = layer.split("metadata") lowerCamelCase_ : Dict = "".join(split_layer[0])[:-1] lowerCamelCase_ : List[str] = [tuple(("metadata" + split_layer[1]).split("/"))] elif "kvstore" in layer: lowerCamelCase_ : List[Any] = layer.split("kvstore") lowerCamelCase_ : List[str] = "".join(split_layer[0])[:-1] lowerCamelCase_ : Tuple = [tuple(("kvstore" + split_layer[1]).split("/"))] else: lowerCamelCase_ : Any = layer.split("/") lowerCamelCase_ : Tuple = "/".join(split_layer[:-1]) lowerCamelCase_ : Any = (split_layer[-1],) if "kvstore/path" in layer: lowerCamelCase_ : Union[str, Any] = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}""" elif "kvstore/driver" in layer: lowerCamelCase_ : List[Any] = "file" else: lowerCamelCase_ : Optional[int] = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Dict = rename_keys(lowerCAmelCase_) lowerCamelCase_ : int = {} for k, v in current_block.items(): lowerCamelCase_ : Optional[Any] = v lowerCamelCase_ : str = new_current_block torch.save(lowerCAmelCase_ , lowerCAmelCase_) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = WEIGHTS_NAME): '''simple docstring''' lowerCamelCase_ : Dict = convert_file_size_to_int(lowerCAmelCase_) lowerCamelCase_ : List[Any] = [] lowerCamelCase_ : int = {} lowerCamelCase_ : Optional[Any] = 0 lowerCamelCase_ : List[Any] = 0 os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_) with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb") as fp: lowerCamelCase_ : str = serialization.msgpack_restore(fp.read())["optimizer"]["target"] lowerCamelCase_ : Dict = flatten_dict(lowerCAmelCase_ , sep="/") lowerCamelCase_ : Optional[int] = {} for layer in checkpoint_info.keys(): lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = get_key_and_tensorstore_dict( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) if curr_real_layer_name in all_layers: lowerCamelCase_ : List[Any] = content else: lowerCamelCase_ : Dict = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file lowerCamelCase_ : Optional[Any] = ts.open(unflatten_dict(all_layers[key])).result().read().result() lowerCamelCase_ : str = torch.tensor(lowerCAmelCase_) lowerCamelCase_ : Union[str, Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype) # use the renaming pattern from the small conversion scripts lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = rename_base_flax_keys(tuple(key.split("/")) , lowerCAmelCase_) lowerCamelCase_ : Any = "/".join(lowerCAmelCase_) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: lowerCamelCase_ : List[Any] = os.path.join( lowerCAmelCase_ , weights_name.replace(".bin" , F"""-{len(lowerCAmelCase_)+1:05d}-of-???.bin""")) rename_and_save_block(lowerCAmelCase_ , lowerCAmelCase_) sharded_state_dicts.append(current_block.keys()) del current_block lowerCamelCase_ : List[Any] = {} lowerCamelCase_ : Optional[int] = 0 lowerCamelCase_ : Any = raw_weights.to(getattr(lowerCAmelCase_ , lowerCAmelCase_)) current_block_size += weight_size total_size += weight_size # Add the last block lowerCamelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , weights_name.replace(".bin" , F"""-{len(lowerCAmelCase_)+1:05d}-of-???.bin""")) rename_and_save_block(lowerCAmelCase_ , lowerCAmelCase_) sharded_state_dicts.append(current_block.keys()) # If we only have one shard, we return it if len(lowerCAmelCase_) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index lowerCamelCase_ : Dict = {} lowerCamelCase_ : List[str] = {} for idx, shard in enumerate(lowerCAmelCase_): lowerCamelCase_ : Optional[int] = weights_name.replace( ".bin" , F"""-{idx+1:05d}-of-{len(lowerCAmelCase_):05d}.bin""") # len(sharded_state_dicts):05d} lowerCamelCase_ : Tuple = os.path.join(lowerCAmelCase_ , weights_name.replace(".bin" , F"""-{idx+1:05d}-of-???.bin""")) os.rename(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , lowerCAmelCase_)) lowerCamelCase_ : Optional[int] = shard for key in shard: lowerCamelCase_ : Union[str, Any] = shard_file # Add the metadata lowerCamelCase_ : Dict = {"total_size": total_size} lowerCamelCase_ : Tuple = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_) , "w" , encoding="utf-8") as f: lowerCamelCase_ : Union[str, Any] = json.dumps(lowerCAmelCase_ , indent=2 , sort_keys=lowerCAmelCase_) + "\n" f.write(lowerCAmelCase_) return metadata, index if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--switch_t5x_checkpoint_path''', default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''', type=str, required=False, help='''Path to a directory containing a folder per layer. Follows the original Google format.''', ) parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''') parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''') parser.add_argument( '''--pytorch_dump_folder_path''', default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''', type=str, required=False, help='''Path to the output pytorch model.''', ) __magic_name__ = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def __magic_name__ ( ): '''simple docstring''' from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer lowerCamelCase_ : Optional[Any] = SwitchTransformersConfig.from_pretrained("google/switch-base-8") config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted") lowerCamelCase_ : str = SwitchTransformersForConditionalGeneration.from_pretrained( "/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto") lowerCamelCase_ : Optional[int] = TaTokenizer.from_pretrained("t5-small") lowerCamelCase_ : Optional[Any] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>." lowerCamelCase_ : Optional[int] = tokenizer(lowerCAmelCase_ , return_tensors="pt").input_ids lowerCamelCase_ : int = model.generate(lowerCAmelCase_ , decoder_start_token_id=0) print(tokenizer.decode(out[0]))
73
from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False): '''simple docstring''' if radian_mode: return [magnitude * cos(lowerCAmelCase_), magnitude * sin(lowerCAmelCase_)] return [magnitude * cos(radians(lowerCAmelCase_)), magnitude * sin(radians(lowerCAmelCase_))] def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10**-1): '''simple docstring''' lowerCamelCase_ : NDArray[floataa] = cross(lowerCAmelCase_ , lowerCAmelCase_) lowerCamelCase_ : float = sum(lowerCAmelCase_) return abs(lowerCAmelCase_) < eps if __name__ == "__main__": # Test to check if it works __magic_name__ = array( [ polar_force(7_18.4, 1_8_0 - 3_0), polar_force(8_79.54, 4_5), polar_force(1_0_0, -9_0), ] ) __magic_name__ = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg __magic_name__ = array( [ polar_force(3_0 * 9.81, 1_5), polar_force(2_1_5, 1_8_0 - 4_5), polar_force(2_6_4, 9_0 - 3_0), ] ) __magic_name__ = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg __magic_name__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]]) __magic_name__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
73
1
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if digit_amount > 0: return round(number - int(lowerCAmelCase_) , lowerCAmelCase_) return number - int(lowerCAmelCase_) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.3_45, 1)) print(decimal_isolate(35.3_45, 2)) print(decimal_isolate(35.3_45, 3)) print(decimal_isolate(-14.7_89, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.1_23, 1)) print(decimal_isolate(-14.1_23, 2)) print(decimal_isolate(-14.1_23, 3))
73
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Dict = '''ClapFeatureExtractor''' __UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self , a_ , a_ ): super().__init__(a_ , a_ ) def __call__( self , a_=None , a_=None , a_=None , **a_ ): lowerCamelCase_ : Any = kwargs.pop("sampling_rate" , a_ ) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none." ) if text is not None: lowerCamelCase_ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if audios is not None: lowerCamelCase_ : List[str] = self.feature_extractor( a_ , sampling_rate=a_ , return_tensors=a_ , **a_ ) if text is not None and audios is not None: lowerCamelCase_ : List[str] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def _UpperCamelCase ( self , *a_ , **a_ ): return self.tokenizer.batch_decode(*a_ , **a_ ) def _UpperCamelCase ( self , *a_ , **a_ ): return self.tokenizer.decode(*a_ , **a_ ) @property def _UpperCamelCase ( self ): lowerCamelCase_ : int = self.tokenizer.model_input_names lowerCamelCase_ : Dict = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
73
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = ViTImageProcessor if is_vision_available() else None @property def _UpperCamelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = (3, 32, 128) lowerCamelCase_ : Optional[Any] = tempfile.mkdtemp() # fmt: off lowerCamelCase_ : Optional[int] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on lowerCamelCase_ : List[str] = dict(zip(a_ , range(len(a_ ) ) ) ) lowerCamelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(a_ ) + "\n" ) lowerCamelCase_ : List[str] = { "do_normalize": False, "do_resize": True, "image_processor_type": "ViTImageProcessor", "resample": 3, "size": {"height": 32, "width": 128}, } lowerCamelCase_ : Optional[int] = os.path.join(self.tmpdirname , a_ ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(a_ , a_ ) def _UpperCamelCase ( self , **a_ ): return MgpstrTokenizer.from_pretrained(self.tmpdirname , **a_ ) def _UpperCamelCase ( self , **a_ ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **a_ ) def _UpperCamelCase ( self ): shutil.rmtree(self.tmpdirname ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta ) lowerCamelCase_ : Union[str, Any] = Image.fromarray(np.moveaxis(a_ , 0 , -1 ) ) return image_input def _UpperCamelCase ( self ): lowerCamelCase_ : str = self.get_tokenizer() lowerCamelCase_ : str = self.get_image_processor() lowerCamelCase_ : str = MgpstrProcessor(tokenizer=a_ , image_processor=a_ ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase_ : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=a_ ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , a_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = self.get_tokenizer() lowerCamelCase_ : Dict = self.get_image_processor() lowerCamelCase_ : Tuple = MgpstrProcessor(tokenizer=a_ , image_processor=a_ ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase_ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowerCamelCase_ : Optional[Any] = self.get_image_processor(do_normalize=a_ , padding_value=1.0 ) lowerCamelCase_ : str = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=a_ , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , a_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : int = self.get_image_processor() lowerCamelCase_ : List[str] = self.get_tokenizer() lowerCamelCase_ : int = MgpstrProcessor(tokenizer=a_ , image_processor=a_ ) lowerCamelCase_ : Any = self.prepare_image_inputs() lowerCamelCase_ : List[str] = image_processor(a_ , return_tensors="np" ) lowerCamelCase_ : Tuple = processor(images=a_ , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = self.get_image_processor() lowerCamelCase_ : Union[str, Any] = self.get_tokenizer() lowerCamelCase_ : Dict = MgpstrProcessor(tokenizer=a_ , image_processor=a_ ) lowerCamelCase_ : Optional[Any] = "test" lowerCamelCase_ : Any = processor(text=a_ ) lowerCamelCase_ : List[str] = tokenizer(a_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _UpperCamelCase ( self ): lowerCamelCase_ : Dict = self.get_image_processor() lowerCamelCase_ : Tuple = self.get_tokenizer() lowerCamelCase_ : Dict = MgpstrProcessor(tokenizer=a_ , image_processor=a_ ) lowerCamelCase_ : str = "test" lowerCamelCase_ : Tuple = self.prepare_image_inputs() lowerCamelCase_ : Tuple = processor(text=a_ , images=a_ ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] ) # test if it raises when no input is passed with pytest.raises(a_ ): processor() def _UpperCamelCase ( self ): lowerCamelCase_ : Union[str, Any] = self.get_image_processor() lowerCamelCase_ : Dict = self.get_tokenizer() lowerCamelCase_ : Any = MgpstrProcessor(tokenizer=a_ , image_processor=a_ ) lowerCamelCase_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase_ : Dict = processor.char_decode(a_ ) lowerCamelCase_ : Optional[Any] = tokenizer.batch_decode(a_ ) lowerCamelCase_ : List[Any] = [seq.replace(" " , "" ) for seq in decoded_tok] self.assertListEqual(a_ , a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : Any = self.get_image_processor() lowerCamelCase_ : Optional[Any] = self.get_tokenizer() lowerCamelCase_ : List[str] = MgpstrProcessor(tokenizer=a_ , image_processor=a_ ) lowerCamelCase_ : Union[str, Any] = None lowerCamelCase_ : Any = self.prepare_image_inputs() lowerCamelCase_ : Dict = processor(text=a_ , images=a_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[Any] = self.get_image_processor() lowerCamelCase_ : List[str] = self.get_tokenizer() lowerCamelCase_ : Union[str, Any] = MgpstrProcessor(tokenizer=a_ , image_processor=a_ ) lowerCamelCase_ : Union[str, Any] = torch.randn(1 , 27 , 38 ) lowerCamelCase_ : Optional[Any] = torch.randn(1 , 27 , 5_0257 ) lowerCamelCase_ : str = torch.randn(1 , 27 , 3_0522 ) lowerCamelCase_ : Optional[Any] = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
73
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ): '''simple docstring''' lowerCamelCase_ : Any = set() # Replace all the whitespace in our sentence lowerCamelCase_ : str = input_str.replace(" " , "") for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower()) return len(lowerCAmelCase_) == 26 def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ): '''simple docstring''' lowerCamelCase_ : List[Any] = [False] * 26 for char in input_str: if char.islower(): lowerCamelCase_ : List[Any] = True elif char.isupper(): lowerCamelCase_ : Optional[int] = True return all(lowerCAmelCase_) def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ): '''simple docstring''' return len({char for char in input_str.lower() if char.isalpha()}) == 26 def __magic_name__ ( ): '''simple docstring''' from timeit import timeit lowerCamelCase_ : Optional[int] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest" print(timeit("is_pangram()" , setup=lowerCAmelCase_)) print(timeit("is_pangram_faster()" , setup=lowerCAmelCase_)) print(timeit("is_pangram_fastest()" , setup=lowerCAmelCase_)) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
73
1
import argparse import json import subprocess def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Dict = [] lowerCamelCase_ : Optional[int] = ( F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\"""" " https://api.github.com/repos/huggingface/transformers/actions/runners" ) lowerCamelCase_ : Tuple = subprocess.run(lowerCAmelCase_ , shell=lowerCAmelCase_ , stdout=subprocess.PIPE) lowerCamelCase_ : List[str] = output.stdout.decode("utf-8") lowerCamelCase_ : int = json.loads(lowerCAmelCase_) lowerCamelCase_ : str = status["runners"] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(lowerCAmelCase_) # save the result so we can report them on Slack with open("offline_runners.txt" , "w") as fp: fp.write(json.dumps(lowerCAmelCase_)) if len(lowerCAmelCase_) > 0: lowerCamelCase_ : List[Any] = "\n".join([x["name"] for x in offline_runners]) raise ValueError(F"""The following runners are offline:\n{failed}""") if __name__ == "__main__": def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' return values.split(",") __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--target_runners''', default=None, type=list_str, required=True, help='''Comma-separated list of runners to check status.''', ) parser.add_argument( '''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.''' ) __magic_name__ = parser.parse_args() get_runner_status(args.target_runners, args.token)
73
__magic_name__ = { "joule": 1.0, "kilojoule": 1_0_0_0, "megajoule": 1_0_0_0_0_0_0, "gigajoule": 1_0_0_0_0_0_0_0_0_0, "wattsecond": 1.0, "watthour": 3_6_0_0, "kilowatthour": 3_6_0_0_0_0_0, "newtonmeter": 1.0, "calorie_nutr": 4_1_8_6.8, "kilocalorie_nutr": 4_1_8_6_8_0_0.0_0, "electronvolt": 1.602_176_634E-19, "britishthermalunit_it": 1_0_5_5.0_5_5_8_5, "footpound": 1.35_58_18, } def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: lowerCamelCase_ : List[Any] = ( F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" F"""Valid values are: {', '.join(lowerCAmelCase_)}""" ) raise ValueError(lowerCAmelCase_) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
73
1
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging __magic_name__ = { '''cola''': 2, '''mnli''': 3, '''mrpc''': 2, '''sst-2''': 2, '''sts-b''': 1, '''qqp''': 2, '''qnli''': 2, '''rte''': 2, '''wnli''': 2, } logging.set_verbosity_info() def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None): '''simple docstring''' lowerCamelCase_ : Tuple = XLNetConfig.from_json_file(lowerCAmelCase_) lowerCamelCase_ : Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else "" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""") lowerCamelCase_ : Union[str, Any] = finetuning_task lowerCamelCase_ : int = GLUE_TASKS_NUM_LABELS[finetuning_task] lowerCamelCase_ : List[Any] = XLNetForSequenceClassification(lowerCAmelCase_) elif "squad" in finetuning_task: lowerCamelCase_ : str = finetuning_task lowerCamelCase_ : str = XLNetForQuestionAnswering(lowerCAmelCase_) else: lowerCamelCase_ : int = XLNetLMHeadModel(lowerCAmelCase_) # Load weights from tf checkpoint load_tf_weights_in_xlnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) # Save pytorch-model lowerCamelCase_ : Dict = os.path.join(lowerCAmelCase_ , lowerCAmelCase_) lowerCamelCase_ : int = os.path.join(lowerCAmelCase_ , lowerCAmelCase_) print(F"""Save PyTorch model to {os.path.abspath(lowerCAmelCase_)}""") torch.save(model.state_dict() , lowerCAmelCase_) print(F"""Save configuration file to {os.path.abspath(lowerCAmelCase_)}""") with open(lowerCAmelCase_ , "w" , encoding="utf-8") as f: f.write(config.to_json_string()) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--xlnet_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained XLNet model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--finetuning_task''', default=None, type=str, help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''', ) __magic_name__ = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
73
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'''vocab_file''': '''spiece.model'''} __magic_name__ = { '''vocab_file''': { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''', } } __magic_name__ = { '''xlnet-base-cased''': None, '''xlnet-large-cased''': None, } # Segments (not really needed) __magic_name__ = 0 __magic_name__ = 1 __magic_name__ = 2 __magic_name__ = 3 __magic_name__ = 4 class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Tuple = VOCAB_FILES_NAMES __UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Optional[int] = '''left''' def __init__( self , a_ , a_=False , a_=True , a_=False , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_=["<eop>", "<eod>"] , a_ = None , **a_ , ): # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token lowerCamelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , ) lowerCamelCase_ : str = 3 lowerCamelCase_ : Dict = do_lower_case lowerCamelCase_ : str = remove_space lowerCamelCase_ : Tuple = keep_accents lowerCamelCase_ : Dict = vocab_file lowerCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(a_ ) @property def _UpperCamelCase ( self ): return len(self.sp_model ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): lowerCamelCase_ : Any = self.__dict__.copy() lowerCamelCase_ : Optional[int] = None return state def __setstate__( self , a_ ): lowerCamelCase_ : Union[str, Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCamelCase_ : int = {} lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _UpperCamelCase ( self , a_ ): if self.remove_space: lowerCamelCase_ : Optional[int] = " ".join(inputs.strip().split() ) else: lowerCamelCase_ : str = inputs lowerCamelCase_ : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: lowerCamelCase_ : Dict = unicodedata.normalize("NFKD" , a_ ) lowerCamelCase_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] ) if self.do_lower_case: lowerCamelCase_ : Any = outputs.lower() return outputs def _UpperCamelCase ( self , a_ ): lowerCamelCase_ : List[Any] = self.preprocess_text(a_ ) lowerCamelCase_ : Optional[int] = self.sp_model.encode(a_ , out_type=a_ ) lowerCamelCase_ : List[str] = [] for piece in pieces: if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): lowerCamelCase_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCamelCase_ : int = cur_pieces[1:] else: lowerCamelCase_ : Union[str, Any] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(a_ ) else: new_pieces.append(a_ ) return new_pieces def _UpperCamelCase ( self , a_ ): return self.sp_model.PieceToId(a_ ) def _UpperCamelCase ( self , a_ ): return self.sp_model.IdToPiece(a_ ) def _UpperCamelCase ( self , a_ ): lowerCamelCase_ : Dict = "".join(a_ ).replace(a_ , " " ).strip() return out_string def _UpperCamelCase ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ): lowerCamelCase_ : int = kwargs.pop("use_source_tokenizer" , a_ ) lowerCamelCase_ : List[str] = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 lowerCamelCase_ : Optional[int] = [] lowerCamelCase_ : List[str] = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(a_ ) ) lowerCamelCase_ : Union[str, Any] = [] sub_texts.append(a_ ) else: current_sub_text.append(a_ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(a_ ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens lowerCamelCase_ : Union[str, Any] = "".join(a_ ) lowerCamelCase_ : Optional[Any] = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: lowerCamelCase_ : List[Any] = self.clean_up_tokenization(a_ ) return clean_text else: return text def _UpperCamelCase ( self , a_ , a_ = None ): lowerCamelCase_ : Optional[Any] = [self.sep_token_id] lowerCamelCase_ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ ) if token_ids_a is not None: return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1] return ([0] * len(a_ )) + [1, 1] def _UpperCamelCase ( self , a_ , a_ = None ): lowerCamelCase_ : Optional[Any] = [self.sep_token_id] lowerCamelCase_ : Union[str, Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _UpperCamelCase ( self , a_ , a_ = None ): if not os.path.isdir(a_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCamelCase_ : Any = os.path.join( a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , a_ ) elif not os.path.isfile(self.vocab_file ): with open(a_ , "wb" ) as fi: lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto() fi.write(a_ ) return (out_vocab_file,)
73
1
import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : List[str] = VideoMAEConfig() set_architecture_configs(lowerCAmelCase_ , lowerCAmelCase_) if "finetuned" not in model_name: lowerCamelCase_ : Tuple = False if "finetuned" in model_name: lowerCamelCase_ : Optional[int] = "huggingface/label-files" if "kinetics" in model_name: lowerCamelCase_ : Union[str, Any] = 400 lowerCamelCase_ : Dict = "kinetics400-id2label.json" elif "ssv2" in model_name: lowerCamelCase_ : Optional[int] = 174 lowerCamelCase_ : Optional[int] = "something-something-v2-id2label.json" else: raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.") lowerCamelCase_ : Dict = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset") , "r")) lowerCamelCase_ : str = {int(lowerCAmelCase_): v for k, v in idalabel.items()} lowerCamelCase_ : Tuple = idalabel lowerCamelCase_ : Dict = {v: k for k, v in idalabel.items()} return config def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if "small" in model_name: lowerCamelCase_ : Dict = 384 lowerCamelCase_ : Optional[int] = 1536 lowerCamelCase_ : int = 12 lowerCamelCase_ : Tuple = 16 lowerCamelCase_ : List[str] = 12 lowerCamelCase_ : Dict = 3 lowerCamelCase_ : Dict = 192 lowerCamelCase_ : Any = 768 elif "large" in model_name: lowerCamelCase_ : Any = 1024 lowerCamelCase_ : Tuple = 4096 lowerCamelCase_ : Union[str, Any] = 24 lowerCamelCase_ : List[str] = 16 lowerCamelCase_ : Union[str, Any] = 12 lowerCamelCase_ : Any = 8 lowerCamelCase_ : Tuple = 512 lowerCamelCase_ : str = 2048 elif "huge" in model_name: lowerCamelCase_ : str = 1280 lowerCamelCase_ : int = 5120 lowerCamelCase_ : List[Any] = 32 lowerCamelCase_ : Dict = 16 lowerCamelCase_ : Tuple = 12 lowerCamelCase_ : Union[str, Any] = 8 lowerCamelCase_ : Dict = 640 lowerCamelCase_ : Tuple = 2560 elif "base" not in model_name: raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' if "encoder." in name: lowerCamelCase_ : List[str] = name.replace("encoder." , "") if "cls_token" in name: lowerCamelCase_ : List[str] = name.replace("cls_token" , "videomae.embeddings.cls_token") if "decoder_pos_embed" in name: lowerCamelCase_ : Optional[Any] = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed") if "pos_embed" in name and "decoder" not in name: lowerCamelCase_ : List[Any] = name.replace("pos_embed" , "videomae.embeddings.position_embeddings") if "patch_embed.proj" in name: lowerCamelCase_ : List[Any] = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection") if "patch_embed.norm" in name: lowerCamelCase_ : Optional[int] = name.replace("patch_embed.norm" , "videomae.embeddings.norm") if "decoder.blocks" in name: lowerCamelCase_ : Optional[int] = name.replace("decoder.blocks" , "decoder.decoder_layers") if "blocks" in name: lowerCamelCase_ : Dict = name.replace("blocks" , "videomae.encoder.layer") if "attn.proj" in name: lowerCamelCase_ : Union[str, Any] = name.replace("attn.proj" , "attention.output.dense") if "attn" in name and "bias" not in name: lowerCamelCase_ : Any = name.replace("attn" , "attention.self") if "attn" in name: lowerCamelCase_ : Any = name.replace("attn" , "attention.attention") if "norm1" in name: lowerCamelCase_ : Tuple = name.replace("norm1" , "layernorm_before") if "norm2" in name: lowerCamelCase_ : str = name.replace("norm2" , "layernorm_after") if "mlp.fc1" in name: lowerCamelCase_ : Tuple = name.replace("mlp.fc1" , "intermediate.dense") if "mlp.fc2" in name: lowerCamelCase_ : Optional[int] = name.replace("mlp.fc2" , "output.dense") if "decoder_embed" in name: lowerCamelCase_ : Any = name.replace("decoder_embed" , "decoder.decoder_embed") if "decoder_norm" in name: lowerCamelCase_ : Tuple = name.replace("decoder_norm" , "decoder.decoder_norm") if "decoder_pred" in name: lowerCamelCase_ : List[str] = name.replace("decoder_pred" , "decoder.decoder_pred") if "norm.weight" in name and "decoder" not in name and "fc" not in name: lowerCamelCase_ : List[Any] = name.replace("norm.weight" , "videomae.layernorm.weight") if "norm.bias" in name and "decoder" not in name and "fc" not in name: lowerCamelCase_ : Optional[Any] = name.replace("norm.bias" , "videomae.layernorm.bias") if "head" in name and "decoder" not in name: lowerCamelCase_ : List[Any] = name.replace("head" , "classifier") return name def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCamelCase_ : int = orig_state_dict.pop(lowerCAmelCase_) if key.startswith("encoder."): lowerCamelCase_ : Any = key.replace("encoder." , "") if "qkv" in key: lowerCamelCase_ : List[str] = key.split(".") if key.startswith("decoder.blocks"): lowerCamelCase_ : Tuple = config.decoder_hidden_size lowerCamelCase_ : int = int(key_split[2]) lowerCamelCase_ : List[str] = "decoder.decoder_layers." if "weight" in key: lowerCamelCase_ : str = val[:dim, :] lowerCamelCase_ : List[Any] = val[dim : dim * 2, :] lowerCamelCase_ : Union[str, Any] = val[-dim:, :] else: lowerCamelCase_ : Union[str, Any] = config.hidden_size lowerCamelCase_ : Any = int(key_split[1]) lowerCamelCase_ : List[Any] = "videomae.encoder.layer." if "weight" in key: lowerCamelCase_ : Optional[int] = val[:dim, :] lowerCamelCase_ : Dict = val[dim : dim * 2, :] lowerCamelCase_ : List[Any] = val[-dim:, :] else: lowerCamelCase_ : Dict = val return orig_state_dict def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : Optional[int] = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset") lowerCamelCase_ : Union[str, Any] = np.load(lowerCAmelCase_) return list(lowerCAmelCase_) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Any = get_videomae_config(lowerCAmelCase_) if "finetuned" in model_name: lowerCamelCase_ : Optional[Any] = VideoMAEForVideoClassification(lowerCAmelCase_) else: lowerCamelCase_ : Union[str, Any] = VideoMAEForPreTraining(lowerCAmelCase_) # download original checkpoint, hosted on Google Drive lowerCamelCase_ : Tuple = "pytorch_model.bin" gdown.cached_download(lowerCAmelCase_ , lowerCAmelCase_ , quiet=lowerCAmelCase_) lowerCamelCase_ : Optional[Any] = torch.load(lowerCAmelCase_ , map_location="cpu") if "model" in files: lowerCamelCase_ : Dict = files["model"] else: lowerCamelCase_ : Optional[Any] = files["module"] lowerCamelCase_ : Optional[int] = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_) model.load_state_dict(lowerCAmelCase_) model.eval() # verify model on basic input lowerCamelCase_ : List[Any] = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5]) lowerCamelCase_ : Any = prepare_video() lowerCamelCase_ : Optional[Any] = image_processor(lowerCAmelCase_ , return_tensors="pt") if "finetuned" not in model_name: lowerCamelCase_ : Union[str, Any] = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt") lowerCamelCase_ : Union[str, Any] = torch.load(lowerCAmelCase_) lowerCamelCase_ : str = model(**lowerCAmelCase_) lowerCamelCase_ : List[str] = outputs.logits lowerCamelCase_ : List[Any] = [ "videomae-small-finetuned-kinetics", "videomae-small-finetuned-ssv2", # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) "videomae-base-short", "videomae-base-short-finetuned-kinetics", "videomae-base", "videomae-base-finetuned-kinetics", "videomae-large", "videomae-large-finetuned-kinetics", "videomae-huge-finetuned-kinetics", # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) "videomae-base-short-ssv2", "videomae-base-short-finetuned-ssv2", "videomae-base-ssv2", "videomae-base-finetuned-ssv2", ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": lowerCamelCase_ : str = torch.Size([1, 400]) lowerCamelCase_ : List[Any] = torch.tensor([-0.92_91, -0.40_61, -0.93_07]) elif model_name == "videomae-small-finetuned-ssv2": lowerCamelCase_ : str = torch.Size([1, 174]) lowerCamelCase_ : Optional[int] = torch.tensor([0.26_71, -0.46_89, -0.82_35]) elif model_name == "videomae-base": lowerCamelCase_ : List[Any] = torch.Size([1, 1408, 1536]) lowerCamelCase_ : Optional[int] = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]]) elif model_name == "videomae-base-short": lowerCamelCase_ : Union[str, Any] = torch.Size([1, 1408, 1536]) lowerCamelCase_ : Any = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]]) # we verified the loss both for normalized and unnormalized targets for this one lowerCamelCase_ : Any = torch.tensor([0.51_42]) if config.norm_pix_loss else torch.tensor([0.64_69]) elif model_name == "videomae-large": lowerCamelCase_ : Dict = torch.Size([1, 1408, 1536]) lowerCamelCase_ : str = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]]) elif model_name == "videomae-large-finetuned-kinetics": lowerCamelCase_ : int = torch.Size([1, 400]) lowerCamelCase_ : Optional[Any] = torch.tensor([0.07_71, 0.00_11, -0.36_25]) elif model_name == "videomae-huge-finetuned-kinetics": lowerCamelCase_ : Union[str, Any] = torch.Size([1, 400]) lowerCamelCase_ : Optional[Any] = torch.tensor([0.24_33, 0.16_32, -0.48_94]) elif model_name == "videomae-base-short-finetuned-kinetics": lowerCamelCase_ : Tuple = torch.Size([1, 400]) lowerCamelCase_ : Tuple = torch.tensor([0.65_88, 0.09_90, -0.24_93]) elif model_name == "videomae-base-finetuned-kinetics": lowerCamelCase_ : Optional[Any] = torch.Size([1, 400]) lowerCamelCase_ : Optional[Any] = torch.tensor([0.36_69, -0.06_88, -0.24_21]) elif model_name == "videomae-base-short-ssv2": lowerCamelCase_ : List[str] = torch.Size([1, 1408, 1536]) lowerCamelCase_ : Optional[Any] = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]]) elif model_name == "videomae-base-short-finetuned-ssv2": lowerCamelCase_ : Dict = torch.Size([1, 174]) lowerCamelCase_ : Tuple = torch.tensor([-0.05_37, -0.15_39, -0.32_66]) elif model_name == "videomae-base-ssv2": lowerCamelCase_ : Union[str, Any] = torch.Size([1, 1408, 1536]) lowerCamelCase_ : Any = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]]) elif model_name == "videomae-base-finetuned-ssv2": lowerCamelCase_ : Optional[int] = torch.Size([1, 174]) lowerCamelCase_ : List[Any] = torch.tensor([0.19_61, -0.83_37, -0.63_89]) else: raise ValueError(F"""Model name not supported. Should be one of {model_names}""") # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1E-4) else: print("Logits:" , logits[0, :3, :3]) assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase_ , atol=1E-4) print("Logits ok!") # verify loss, if applicable if model_name == "videomae-base-short": lowerCamelCase_ : Dict = outputs.loss assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-4) print("Loss ok!") if pytorch_dump_folder_path is not None: print(F"""Saving model and image processor to {pytorch_dump_folder_path}""") image_processor.save_pretrained(lowerCAmelCase_) model.save_pretrained(lowerCAmelCase_) if push_to_hub: print("Pushing to the hub...") model.push_to_hub(lowerCAmelCase_ , organization="nielsr") if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''', type=str, help=( '''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct''' ''' download link.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''/Users/nielsrogge/Documents/VideoMAE/Test''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) __magic_name__ = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
73
def __magic_name__ ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = True): '''simple docstring''' assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError("Invalid value for min_val or max_val (min_value < max_value)") return min_val if option else max_val def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' return int((number_a + number_a) / 2) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) ), 'argument values must be type of "int"' if lower > higher: raise ValueError("argument value for lower and higher must be(lower > higher)") if not lower < to_guess < higher: raise ValueError( "guess value must be within the range of lower and higher value") def answer(lowerCAmelCase_) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print("started...") lowerCamelCase_ : Optional[int] = lower lowerCamelCase_ : Tuple = higher lowerCamelCase_ : Union[str, Any] = [] while True: lowerCamelCase_ : Optional[int] = get_avg(lowerCAmelCase_ , lowerCAmelCase_) last_numbers.append(lowerCAmelCase_) if answer(lowerCAmelCase_) == "low": lowerCamelCase_ : Any = number elif answer(lowerCAmelCase_) == "high": lowerCamelCase_ : Optional[int] = number else: break print(F"""guess the number : {last_numbers[-1]}""") print(F"""details : {last_numbers!s}""") def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : Optional[int] = int(input("Enter lower value : ").strip()) lowerCamelCase_ : List[str] = int(input("Enter high value : ").strip()) lowerCamelCase_ : List[str] = int(input("Enter value to guess : ").strip()) guess_the_number(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) if __name__ == "__main__": main()
73
1
def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Any = generate_pascal_triangle(lowerCAmelCase_) for row_idx in range(lowerCAmelCase_): # Print left spaces for _ in range(num_rows - row_idx - 1): print(end=" ") # Print row values for col_idx in range(row_idx + 1): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=" ") else: print(triangle[row_idx][col_idx] , end="") print() def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' if not isinstance(lowerCAmelCase_ , lowerCAmelCase_): raise TypeError("The input value of 'num_rows' should be 'int'") if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0") lowerCamelCase_ : list[list[int]] = [] for current_row_idx in range(lowerCAmelCase_): lowerCamelCase_ : Any = populate_current_row(lowerCAmelCase_ , lowerCAmelCase_) triangle.append(lowerCAmelCase_) return triangle def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : int = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 lowerCamelCase_ ,lowerCamelCase_ : int = 1, 1 for current_col_idx in range(1 , lowerCAmelCase_): calculate_current_element( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) return current_row def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ): '''simple docstring''' lowerCamelCase_ : Tuple = triangle[current_row_idx - 1][current_col_idx - 1] lowerCamelCase_ : Tuple = triangle[current_row_idx - 1][current_col_idx] lowerCamelCase_ : Any = above_to_left_elt + above_to_right_elt def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' if not isinstance(lowerCAmelCase_ , lowerCAmelCase_): raise TypeError("The input value of 'num_rows' should be 'int'") if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0") lowerCamelCase_ : list[list[int]] = [[1]] for row_index in range(1 , lowerCAmelCase_): lowerCamelCase_ : Dict = [0] + result[-1] + [0] lowerCamelCase_ : Any = row_index + 1 # Calculate the number of distinct elements in a row lowerCamelCase_ : int = sum(divmod(lowerCAmelCase_ , 2)) lowerCamelCase_ : List[Any] = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1) ] lowerCamelCase_ : Dict = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() lowerCamelCase_ : Optional[int] = row_first_half + row_second_half result.append(lowerCAmelCase_) return result def __magic_name__ ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(lowerCAmelCase_ , lowerCAmelCase_) -> None: lowerCamelCase_ : str = F"""{func.__name__}({value})""" lowerCamelCase_ : Optional[Any] = timeit(F"""__main__.{call}""" , setup="import __main__") # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F"""{call:38} -- {timing:.4f} seconds""") for value in range(15): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(lowerCAmelCase_ , lowerCAmelCase_) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
73
from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''', # See all Cvt models at https://huggingface.co/models?filter=cvt } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : List[str] = '''cvt''' def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ): super().__init__(**a_ ) lowerCamelCase_ : Optional[Any] = num_channels lowerCamelCase_ : str = patch_sizes lowerCamelCase_ : List[Any] = patch_stride lowerCamelCase_ : str = patch_padding lowerCamelCase_ : str = embed_dim lowerCamelCase_ : Union[str, Any] = num_heads lowerCamelCase_ : Optional[Any] = depth lowerCamelCase_ : int = mlp_ratio lowerCamelCase_ : Union[str, Any] = attention_drop_rate lowerCamelCase_ : Optional[Any] = drop_rate lowerCamelCase_ : Optional[int] = drop_path_rate lowerCamelCase_ : Union[str, Any] = qkv_bias lowerCamelCase_ : int = cls_token lowerCamelCase_ : int = qkv_projection_method lowerCamelCase_ : int = kernel_qkv lowerCamelCase_ : Optional[Any] = padding_kv lowerCamelCase_ : Optional[int] = stride_kv lowerCamelCase_ : Optional[int] = padding_q lowerCamelCase_ : List[Any] = stride_q lowerCamelCase_ : Any = initializer_range lowerCamelCase_ : int = layer_norm_eps
73
1
import math from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''', # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = '''data2vec-audio''' def __init__( self , a_=32 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=0.1 , a_=0.0 , a_=0.1 , a_=0.1 , a_=0.02 , a_=1E-5 , a_="gelu" , a_=(512, 512, 512, 512, 512, 512, 512) , a_=(5, 2, 2, 2, 2, 2, 2) , a_=(10, 3, 3, 3, 3, 2, 2) , a_=False , a_=16 , a_=19 , a_=5 , a_=0.05 , a_=10 , a_=2 , a_=0.0 , a_=10 , a_=0 , a_="sum" , a_=False , a_=False , a_=256 , a_=(512, 512, 512, 512, 1500) , a_=(5, 3, 3, 1, 1) , a_=(1, 2, 3, 1, 1) , a_=512 , a_=0 , a_=1 , a_=2 , a_=False , a_=3 , a_=2 , a_=3 , a_=None , **a_ , ): super().__init__(**a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ ) lowerCamelCase_ : Any = hidden_size lowerCamelCase_ : Optional[int] = feat_extract_activation lowerCamelCase_ : str = list(a_ ) lowerCamelCase_ : Union[str, Any] = list(a_ ) lowerCamelCase_ : Optional[int] = list(a_ ) lowerCamelCase_ : List[str] = conv_bias lowerCamelCase_ : List[Any] = num_conv_pos_embeddings lowerCamelCase_ : Optional[int] = num_conv_pos_embedding_groups lowerCamelCase_ : Optional[int] = conv_pos_kernel_size lowerCamelCase_ : Union[str, Any] = len(self.conv_dim ) lowerCamelCase_ : Union[str, Any] = num_hidden_layers lowerCamelCase_ : Tuple = intermediate_size lowerCamelCase_ : Optional[int] = hidden_act lowerCamelCase_ : Any = num_attention_heads lowerCamelCase_ : List[Any] = hidden_dropout lowerCamelCase_ : List[Any] = attention_dropout lowerCamelCase_ : List[str] = activation_dropout lowerCamelCase_ : Optional[int] = feat_proj_dropout lowerCamelCase_ : Optional[int] = final_dropout lowerCamelCase_ : List[str] = layerdrop lowerCamelCase_ : str = layer_norm_eps lowerCamelCase_ : str = initializer_range lowerCamelCase_ : str = vocab_size lowerCamelCase_ : Tuple = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase_ : Any = mask_time_prob lowerCamelCase_ : Tuple = mask_time_length lowerCamelCase_ : Union[str, Any] = mask_time_min_masks lowerCamelCase_ : str = mask_feature_prob lowerCamelCase_ : Optional[int] = mask_feature_length lowerCamelCase_ : Any = mask_feature_min_masks # ctc loss lowerCamelCase_ : Optional[Any] = ctc_loss_reduction lowerCamelCase_ : List[str] = ctc_zero_infinity # adapter lowerCamelCase_ : List[Any] = add_adapter lowerCamelCase_ : Dict = adapter_kernel_size lowerCamelCase_ : List[Any] = adapter_stride lowerCamelCase_ : List[Any] = num_adapter_layers lowerCamelCase_ : Any = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowerCamelCase_ : List[str] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowerCamelCase_ : str = list(a_ ) lowerCamelCase_ : Optional[int] = list(a_ ) lowerCamelCase_ : Union[str, Any] = list(a_ ) lowerCamelCase_ : Any = xvector_output_dim @property def _UpperCamelCase ( self ): return math.prod(self.conv_stride )
73
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __magic_name__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
73
1
from __future__ import annotations def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): lowerCamelCase_ ,lowerCamelCase_ : int = array[indexa], array[indexa] def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if length > 1: lowerCamelCase_ : Optional[Any] = int(length / 2) for i in range(lowerCAmelCase_ , low + middle): comp_and_swap(lowerCAmelCase_ , lowerCAmelCase_ , i + middle , lowerCAmelCase_) bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) bitonic_merge(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , lowerCAmelCase_) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if length > 1: lowerCamelCase_ : List[Any] = int(length / 2) bitonic_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , 1) bitonic_sort(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , 0) bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) if __name__ == "__main__": __magic_name__ = input('''Enter numbers separated by a comma:\n''').strip() __magic_name__ = [int(item.strip()) for item in user_input.split(''',''')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('''\nSorted array in ascending order is: ''', end='''''') print(*unsorted, sep=''', ''') bitonic_merge(unsorted, 0, len(unsorted), 0) print('''Sorted array in descending order is: ''', end='''''') print(*unsorted, sep=''', ''')
73
from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Dict = '''EncodecFeatureExtractor''' __UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self , a_ , a_ ): super().__init__(a_ , a_ ) lowerCamelCase_ : Optional[Any] = self.feature_extractor lowerCamelCase_ : Optional[int] = False def _UpperCamelCase ( self , a_=None , a_=None , a_=True ): return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ ) def __call__( self , *a_ , **a_ ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*a_ , **a_ ) lowerCamelCase_ : str = kwargs.pop("audio" , a_ ) lowerCamelCase_ : List[str] = kwargs.pop("sampling_rate" , a_ ) lowerCamelCase_ : Optional[Any] = kwargs.pop("text" , a_ ) if len(a_ ) > 0: lowerCamelCase_ : int = args[0] lowerCamelCase_ : str = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if text is not None: lowerCamelCase_ : Dict = self.tokenizer(a_ , **a_ ) if audio is not None: lowerCamelCase_ : Optional[Any] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ ) if audio is None: return inputs elif text is None: return audio_inputs else: lowerCamelCase_ : Dict = audio_inputs["input_values"] if "padding_mask" in audio_inputs: lowerCamelCase_ : int = audio_inputs["padding_mask"] return inputs def _UpperCamelCase ( self , *a_ , **a_ ): lowerCamelCase_ : Dict = kwargs.pop("audio" , a_ ) lowerCamelCase_ : Optional[Any] = kwargs.pop("padding_mask" , a_ ) if len(a_ ) > 0: lowerCamelCase_ : Optional[int] = args[0] lowerCamelCase_ : Optional[Any] = args[1:] if audio_values is not None: return self._decode_audio(a_ , padding_mask=a_ ) else: return self.tokenizer.batch_decode(*a_ , **a_ ) def _UpperCamelCase ( self , *a_ , **a_ ): return self.tokenizer.decode(*a_ , **a_ ) def _UpperCamelCase ( self , a_ , a_ = None ): lowerCamelCase_ : Any = to_numpy(a_ ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = audio_values.shape if padding_mask is None: return list(a_ ) lowerCamelCase_ : Tuple = to_numpy(a_ ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) lowerCamelCase_ : List[str] = seq_len - padding_mask.shape[-1] lowerCamelCase_ : int = 1 - self.feature_extractor.padding_value lowerCamelCase_ : List[Any] = np.pad(a_ , ((0, 0), (0, difference)) , "constant" , constant_values=a_ ) lowerCamelCase_ : str = audio_values.tolist() for i in range(a_ ): lowerCamelCase_ : Dict = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] lowerCamelCase_ : Dict = sliced_audio.reshape(a_ , -1 ) return audio_values
73
1
def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' if not isinstance(lowerCAmelCase_ , lowerCAmelCase_): raise ValueError("multiplicative_persistence() only accepts integral values") if num < 0: raise ValueError("multiplicative_persistence() does not accept negative values") lowerCamelCase_ : Tuple = 0 lowerCamelCase_ : Any = str(lowerCAmelCase_) while len(lowerCAmelCase_) != 1: lowerCamelCase_ : List[str] = [int(lowerCAmelCase_) for i in num_string] lowerCamelCase_ : Tuple = 1 for i in range(0 , len(lowerCAmelCase_)): total *= numbers[i] lowerCamelCase_ : Optional[int] = str(lowerCAmelCase_) steps += 1 return steps def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' if not isinstance(lowerCAmelCase_ , lowerCAmelCase_): raise ValueError("additive_persistence() only accepts integral values") if num < 0: raise ValueError("additive_persistence() does not accept negative values") lowerCamelCase_ : Optional[int] = 0 lowerCamelCase_ : Union[str, Any] = str(lowerCAmelCase_) while len(lowerCAmelCase_) != 1: lowerCamelCase_ : Optional[Any] = [int(lowerCAmelCase_) for i in num_string] lowerCamelCase_ : Any = 0 for i in range(0 , len(lowerCAmelCase_)): total += numbers[i] lowerCamelCase_ : Tuple = str(lowerCAmelCase_) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
73
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if digit_amount > 0: return round(number - int(lowerCAmelCase_) , lowerCAmelCase_) return number - int(lowerCAmelCase_) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.3_45, 1)) print(decimal_isolate(35.3_45, 2)) print(decimal_isolate(35.3_45, 3)) print(decimal_isolate(-14.7_89, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.1_23, 1)) print(decimal_isolate(-14.1_23, 2)) print(decimal_isolate(-14.1_23, 3))
73
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'''vocab_file''': '''spiece.model'''} __magic_name__ = { '''vocab_file''': { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''', } } __magic_name__ = { '''xlnet-base-cased''': None, '''xlnet-large-cased''': None, } # Segments (not really needed) __magic_name__ = 0 __magic_name__ = 1 __magic_name__ = 2 __magic_name__ = 3 __magic_name__ = 4 class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Tuple = VOCAB_FILES_NAMES __UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Optional[int] = '''left''' def __init__( self , a_ , a_=False , a_=True , a_=False , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_=["<eop>", "<eod>"] , a_ = None , **a_ , ): # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token lowerCamelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , ) lowerCamelCase_ : str = 3 lowerCamelCase_ : Dict = do_lower_case lowerCamelCase_ : str = remove_space lowerCamelCase_ : Tuple = keep_accents lowerCamelCase_ : Dict = vocab_file lowerCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(a_ ) @property def _UpperCamelCase ( self ): return len(self.sp_model ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): lowerCamelCase_ : Any = self.__dict__.copy() lowerCamelCase_ : Optional[int] = None return state def __setstate__( self , a_ ): lowerCamelCase_ : Union[str, Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCamelCase_ : int = {} lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _UpperCamelCase ( self , a_ ): if self.remove_space: lowerCamelCase_ : Optional[int] = " ".join(inputs.strip().split() ) else: lowerCamelCase_ : str = inputs lowerCamelCase_ : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: lowerCamelCase_ : Dict = unicodedata.normalize("NFKD" , a_ ) lowerCamelCase_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] ) if self.do_lower_case: lowerCamelCase_ : Any = outputs.lower() return outputs def _UpperCamelCase ( self , a_ ): lowerCamelCase_ : List[Any] = self.preprocess_text(a_ ) lowerCamelCase_ : Optional[int] = self.sp_model.encode(a_ , out_type=a_ ) lowerCamelCase_ : List[str] = [] for piece in pieces: if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): lowerCamelCase_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCamelCase_ : int = cur_pieces[1:] else: lowerCamelCase_ : Union[str, Any] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(a_ ) else: new_pieces.append(a_ ) return new_pieces def _UpperCamelCase ( self , a_ ): return self.sp_model.PieceToId(a_ ) def _UpperCamelCase ( self , a_ ): return self.sp_model.IdToPiece(a_ ) def _UpperCamelCase ( self , a_ ): lowerCamelCase_ : Dict = "".join(a_ ).replace(a_ , " " ).strip() return out_string def _UpperCamelCase ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ): lowerCamelCase_ : int = kwargs.pop("use_source_tokenizer" , a_ ) lowerCamelCase_ : List[str] = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 lowerCamelCase_ : Optional[int] = [] lowerCamelCase_ : List[str] = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(a_ ) ) lowerCamelCase_ : Union[str, Any] = [] sub_texts.append(a_ ) else: current_sub_text.append(a_ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(a_ ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens lowerCamelCase_ : Union[str, Any] = "".join(a_ ) lowerCamelCase_ : Optional[Any] = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: lowerCamelCase_ : List[Any] = self.clean_up_tokenization(a_ ) return clean_text else: return text def _UpperCamelCase ( self , a_ , a_ = None ): lowerCamelCase_ : Optional[Any] = [self.sep_token_id] lowerCamelCase_ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ ) if token_ids_a is not None: return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1] return ([0] * len(a_ )) + [1, 1] def _UpperCamelCase ( self , a_ , a_ = None ): lowerCamelCase_ : Optional[Any] = [self.sep_token_id] lowerCamelCase_ : Union[str, Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _UpperCamelCase ( self , a_ , a_ = None ): if not os.path.isdir(a_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCamelCase_ : Any = os.path.join( a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , a_ ) elif not os.path.isfile(self.vocab_file ): with open(a_ , "wb" ) as fi: lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto() fi.write(a_ ) return (out_vocab_file,)
73
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ): lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18} lowerCamelCase_ : str = parent lowerCamelCase_ : str = batch_size lowerCamelCase_ : Tuple = num_channels lowerCamelCase_ : Optional[int] = image_size lowerCamelCase_ : List[str] = min_resolution lowerCamelCase_ : Tuple = max_resolution lowerCamelCase_ : Tuple = do_resize lowerCamelCase_ : Dict = size lowerCamelCase_ : List[str] = apply_ocr def _UpperCamelCase ( self ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self ) @property def _UpperCamelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , "do_resize" ) ) self.assertTrue(hasattr(a_ , "size" ) ) self.assertTrue(hasattr(a_ , "apply_ocr" ) ) def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def _UpperCamelCase ( self ): pass def _UpperCamelCase ( self ): # Initialize image_processing lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) self.assertIsInstance(encoding.words , a_ ) self.assertIsInstance(encoding.boxes , a_ ) # Test batched lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase ( self ): # Initialize image_processing lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase ( self ): # Initialize image_processing lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase ( self ): # with apply_OCR = True lowerCamelCase_ : Any = LayoutLMvaImageProcessor() from datasets import load_dataset lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" ) lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" ) lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , a_ ) self.assertListEqual(encoding.boxes , a_ ) # with apply_OCR = False lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ ) lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
73
1
import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device __magic_name__ = False class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" pass @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase ( self ): lowerCamelCase_ : str = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) lowerCamelCase_ : Optional[int] = torch.manual_seed(0 ) lowerCamelCase_ : Union[str, Any] = pipe( image=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images lowerCamelCase_ : Dict = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase_ : List[Any] = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
73
from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''', '''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''', } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : List[Any] = '''luke''' def __init__( self , a_=5_0267 , a_=50_0000 , a_=768 , a_=256 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=True , a_=None , a_=1 , a_=0 , a_=2 , **a_ , ): super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ ) lowerCamelCase_ : Tuple = vocab_size lowerCamelCase_ : Optional[int] = entity_vocab_size lowerCamelCase_ : Any = hidden_size lowerCamelCase_ : Dict = entity_emb_size lowerCamelCase_ : List[Any] = num_hidden_layers lowerCamelCase_ : int = num_attention_heads lowerCamelCase_ : Union[str, Any] = hidden_act lowerCamelCase_ : Tuple = intermediate_size lowerCamelCase_ : Optional[Any] = hidden_dropout_prob lowerCamelCase_ : Any = attention_probs_dropout_prob lowerCamelCase_ : Optional[Any] = max_position_embeddings lowerCamelCase_ : str = type_vocab_size lowerCamelCase_ : int = initializer_range lowerCamelCase_ : List[Any] = layer_norm_eps lowerCamelCase_ : Optional[int] = use_entity_aware_attention lowerCamelCase_ : str = classifier_dropout
73
1
from __future__ import annotations __magic_name__ = [] def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' for i in range(len(lowerCAmelCase_)): if board[row][i] == 1: return False for i in range(len(lowerCAmelCase_)): if board[i][column] == 1: return False for i, j in zip(range(lowerCAmelCase_ , -1 , -1) , range(lowerCAmelCase_ , -1 , -1)): if board[i][j] == 1: return False for i, j in zip(range(lowerCAmelCase_ , -1 , -1) , range(lowerCAmelCase_ , len(lowerCAmelCase_))): if board[i][j] == 1: return False return True def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if row >= len(lowerCAmelCase_): solution.append(lowerCAmelCase_) printboard(lowerCAmelCase_) print() return True for i in range(len(lowerCAmelCase_)): if is_safe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): lowerCamelCase_ : str = 1 solve(lowerCAmelCase_ , row + 1) lowerCamelCase_ : Dict = 0 return False def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' for i in range(len(lowerCAmelCase_)): for j in range(len(lowerCAmelCase_)): if board[i][j] == 1: print("Q" , end=" ") else: print("." , end=" ") print() # n=int(input("The no. of queens")) __magic_name__ = 8 __magic_name__ = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('''The total no. of solutions are :''', len(solution))
73
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int __magic_name__ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class lowerCAmelCase__ ( datasets.BuilderConfig ): """simple docstring""" __UpperCAmelCase : Optional[datasets.Features] = None def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , ): '''simple docstring''' import pyspark def generate_fn(): lowerCamelCase_ : Dict = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id")) for partition_id in partition_order: lowerCamelCase_ : Dict = df_with_partition_id.select("*").where(F"""part_id = {partition_id}""").drop("part_id") lowerCamelCase_ : Dict = partition_df.collect() lowerCamelCase_ : Dict = 0 for row in rows: yield F"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class lowerCAmelCase__ ( _BaseExamplesIterable ): """simple docstring""" def __init__( self , a_ , a_=None , ): lowerCamelCase_ : Dict = df lowerCamelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() ) lowerCamelCase_ : int = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self ): yield from self.generate_examples_fn() def _UpperCamelCase ( self , a_ ): lowerCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(a_ ) return SparkExamplesIterable(self.df , partition_order=a_ ) def _UpperCamelCase ( self , a_ , a_ ): lowerCamelCase_ : Dict = self.split_shard_indices_by_worker(a_ , a_ ) return SparkExamplesIterable(self.df , partition_order=a_ ) @property def _UpperCamelCase ( self ): return len(self.partition_order ) class lowerCAmelCase__ ( datasets.DatasetBuilder ): """simple docstring""" __UpperCAmelCase : Any = SparkConfig def __init__( self , a_ , a_ = None , a_ = None , **a_ , ): import pyspark lowerCamelCase_ : str = pyspark.sql.SparkSession.builder.getOrCreate() lowerCamelCase_ : Optional[Any] = df lowerCamelCase_ : List[Any] = working_dir super().__init__( cache_dir=a_ , config_name=str(self.df.semanticHash() ) , **a_ , ) def _UpperCamelCase ( self ): # Returns the path of the created file. def create_cache_and_write_probe(a_ ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=a_ ) lowerCamelCase_ : Optional[Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(a_ , "a" ) return [probe_file] if self._spark.conf.get("spark.master" , "" ).startswith("local" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: lowerCamelCase_ : List[str] = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" ) def _UpperCamelCase ( self ): return datasets.DatasetInfo(features=self.config.features ) def _UpperCamelCase ( self , a_ ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def _UpperCamelCase ( self , a_ ): import pyspark def get_arrow_batch_size(a_ ): for batch in it: yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} ) lowerCamelCase_ : str = self.df.count() lowerCamelCase_ : List[Any] = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. lowerCamelCase_ : Any = ( self.df.limit(a_ ) .repartition(1 ) .mapInArrow(a_ , "batch_bytes: long" ) .agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) ) .collect()[0] .sample_bytes / sample_num_rows ) lowerCamelCase_ : int = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. lowerCamelCase_ : Union[str, Any] = min(a_ , int(approx_total_size / max_shard_size ) ) lowerCamelCase_ : int = self.df.repartition(a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , ): import pyspark lowerCamelCase_ : str = ParquetWriter if file_format == "parquet" else ArrowWriter lowerCamelCase_ : int = os.path.join(self._working_dir , os.path.basename(a_ ) ) if self._working_dir else fpath lowerCamelCase_ : Optional[Any] = file_format == "parquet" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. lowerCamelCase_ : int = self.config.features lowerCamelCase_ : Any = self._writer_batch_size lowerCamelCase_ : Tuple = self._fs.storage_options def write_arrow(a_ ): # Within the same SparkContext, no two task attempts will share the same attempt ID. lowerCamelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId() lowerCamelCase_ : Optional[int] = next(a_ , a_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , ) lowerCamelCase_ : List[Any] = 0 lowerCamelCase_ : Optional[int] = writer_class( features=a_ , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , ) lowerCamelCase_ : Optional[Any] = pa.Table.from_batches([first_batch] ) writer.write_table(a_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: lowerCamelCase_ ,lowerCamelCase_ : List[str] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) shard_id += 1 lowerCamelCase_ : List[str] = writer_class( features=writer._features , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , ) lowerCamelCase_ : Optional[int] = pa.Table.from_batches([batch] ) writer.write_table(a_ ) if writer._num_bytes > 0: lowerCamelCase_ ,lowerCamelCase_ : Dict = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(a_ ) ): lowerCamelCase_ : str = os.path.join(os.path.dirname(a_ ) , os.path.basename(a_ ) ) shutil.move(a_ , a_ ) lowerCamelCase_ : int = ( self.df.mapInArrow(a_ , "task_id: long, num_examples: long, num_bytes: long" ) .groupBy("task_id" ) .agg( pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def _UpperCamelCase ( self , a_ , a_ = "arrow" , a_ = None , a_ = None , **a_ , ): self._validate_cache_dir() lowerCamelCase_ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(a_ ) lowerCamelCase_ : Dict = not is_remote_filesystem(self._fs ) lowerCamelCase_ : List[str] = os.path.join if is_local else posixpath.join lowerCamelCase_ : Any = "-TTTTT-SSSSS-of-NNNNN" lowerCamelCase_ : List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" lowerCamelCase_ : int = path_join(self._output_dir , a_ ) lowerCamelCase_ : int = 0 lowerCamelCase_ : Optional[Any] = 0 lowerCamelCase_ : int = 0 lowerCamelCase_ : Dict = [] lowerCamelCase_ : Any = [] for task_id, content in self._prepare_split_single(a_ , a_ , a_ ): ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) : Tuple = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(a_ ) lowerCamelCase_ : Dict = total_num_examples lowerCamelCase_ : Any = total_num_bytes # should rename everything at the end logger.debug(F"""Renaming {total_shards} shards.""" ) if total_shards > 1: lowerCamelCase_ : List[Any] = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. lowerCamelCase_ : Any = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( a_ , a_ , a_ , ): rename( a_ , fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , F"""{global_shard_id:05d}""" ).replace("NNNNN" , F"""{total_shards:05d}""" ) , ) lowerCamelCase_ : Optional[int] = [] lowerCamelCase_ : Dict = 0 for i in range(len(a_ ) ): lowerCamelCase_ ,lowerCamelCase_ : Tuple = task_id_and_num_shards[i] for shard_id in range(a_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(a_ , len(a_ ) ).map(lambda a_ : _rename_shard(*a_ ) ).collect() else: # don't use any pattern lowerCamelCase_ : int = 0 lowerCamelCase_ : Optional[int] = task_id_and_num_shards[0][0] self._rename( fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace(a_ , "" ) , ) def _UpperCamelCase ( self , a_ , ): return SparkExamplesIterable(self.df )
73
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : int = '''wav2vec2''' def __init__( self , a_=32 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=0.1 , a_=0.0 , a_=0.0 , a_=0.1 , a_=0.1 , a_=0.02 , a_=1E-5 , a_="group" , a_="gelu" , a_=(512, 512, 512, 512, 512, 512, 512) , a_=(5, 2, 2, 2, 2, 2, 2) , a_=(10, 3, 3, 3, 3, 2, 2) , a_=False , a_=128 , a_=16 , a_=False , a_=True , a_=0.05 , a_=10 , a_=2 , a_=0.0 , a_=10 , a_=0 , a_=320 , a_=2 , a_=0.1 , a_=100 , a_=256 , a_=256 , a_=0.1 , a_="sum" , a_=False , a_=False , a_=256 , a_=(512, 512, 512, 512, 1500) , a_=(5, 3, 3, 1, 1) , a_=(1, 2, 3, 1, 1) , a_=512 , a_=0 , a_=1 , a_=2 , a_=False , a_=3 , a_=2 , a_=3 , a_=None , a_=None , **a_ , ): super().__init__(**a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ ) lowerCamelCase_ : List[Any] = hidden_size lowerCamelCase_ : List[Any] = feat_extract_norm lowerCamelCase_ : List[Any] = feat_extract_activation lowerCamelCase_ : Optional[Any] = list(a_ ) lowerCamelCase_ : Optional[int] = list(a_ ) lowerCamelCase_ : Any = list(a_ ) lowerCamelCase_ : Union[str, Any] = conv_bias lowerCamelCase_ : int = num_conv_pos_embeddings lowerCamelCase_ : List[str] = num_conv_pos_embedding_groups lowerCamelCase_ : int = len(self.conv_dim ) lowerCamelCase_ : Dict = num_hidden_layers lowerCamelCase_ : str = intermediate_size lowerCamelCase_ : Optional[int] = hidden_act lowerCamelCase_ : Optional[int] = num_attention_heads lowerCamelCase_ : Union[str, Any] = hidden_dropout lowerCamelCase_ : Tuple = attention_dropout lowerCamelCase_ : int = activation_dropout lowerCamelCase_ : List[Any] = feat_proj_dropout lowerCamelCase_ : List[str] = final_dropout lowerCamelCase_ : int = layerdrop lowerCamelCase_ : Dict = layer_norm_eps lowerCamelCase_ : List[Any] = initializer_range lowerCamelCase_ : Tuple = vocab_size lowerCamelCase_ : Tuple = do_stable_layer_norm lowerCamelCase_ : Optional[int] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase_ : Optional[Any] = apply_spec_augment lowerCamelCase_ : Union[str, Any] = mask_time_prob lowerCamelCase_ : int = mask_time_length lowerCamelCase_ : Union[str, Any] = mask_time_min_masks lowerCamelCase_ : Union[str, Any] = mask_feature_prob lowerCamelCase_ : Tuple = mask_feature_length lowerCamelCase_ : List[Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowerCamelCase_ : int = num_codevectors_per_group lowerCamelCase_ : int = num_codevector_groups lowerCamelCase_ : Any = contrastive_logits_temperature lowerCamelCase_ : Tuple = feat_quantizer_dropout lowerCamelCase_ : Optional[int] = num_negatives lowerCamelCase_ : Dict = codevector_dim lowerCamelCase_ : int = proj_codevector_dim lowerCamelCase_ : List[str] = diversity_loss_weight # ctc loss lowerCamelCase_ : Tuple = ctc_loss_reduction lowerCamelCase_ : str = ctc_zero_infinity # adapter lowerCamelCase_ : Union[str, Any] = add_adapter lowerCamelCase_ : Dict = adapter_kernel_size lowerCamelCase_ : Dict = adapter_stride lowerCamelCase_ : Optional[Any] = num_adapter_layers lowerCamelCase_ : Any = output_hidden_size or hidden_size lowerCamelCase_ : str = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowerCamelCase_ : Union[str, Any] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowerCamelCase_ : Optional[int] = list(a_ ) lowerCamelCase_ : str = list(a_ ) lowerCamelCase_ : Optional[Any] = list(a_ ) lowerCamelCase_ : List[str] = xvector_output_dim @property def _UpperCamelCase ( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
73
from queue import PriorityQueue from typing import Any import numpy as np def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ): '''simple docstring''' for nxt, d in graph[v]: if nxt in visited_forward: continue lowerCamelCase_ : List[str] = cst_fwd.get(lowerCAmelCase_ , np.inf) lowerCamelCase_ : Dict = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt)) lowerCamelCase_ : Optional[int] = new_cost_f lowerCamelCase_ : List[str] = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: lowerCamelCase_ : Tuple = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[Any] = -1 lowerCamelCase_ : Tuple = set() lowerCamelCase_ : Dict = set() lowerCamelCase_ : int = {source: 0} lowerCamelCase_ : str = {destination: 0} lowerCamelCase_ : Tuple = {source: None} lowerCamelCase_ : Dict = {destination: None} lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue() lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue() lowerCamelCase_ : List[str] = np.inf queue_forward.put((0, source)) queue_backward.put((0, destination)) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): lowerCamelCase_ ,lowerCamelCase_ : List[Any] = queue_forward.get() visited_forward.add(lowerCAmelCase_) lowerCamelCase_ ,lowerCamelCase_ : str = queue_backward.get() visited_backward.add(lowerCAmelCase_) lowerCamelCase_ : Any = pass_and_relaxation( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) lowerCamelCase_ : Dict = pass_and_relaxation( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: lowerCamelCase_ : Union[str, Any] = shortest_distance return shortest_path_distance __magic_name__ = { '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } __magic_name__ = { '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
73
1
from random import randint, random def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = 5 , ): '''simple docstring''' lowerCamelCase_ : Any = [[-1] * number_of_cells] # Create a highway without any car lowerCamelCase_ : Optional[Any] = 0 lowerCamelCase_ : int = max(lowerCAmelCase_ , 0) while i < number_of_cells: lowerCamelCase_ : Union[str, Any] = ( randint(0 , lowerCAmelCase_) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[int] = 0 lowerCamelCase_ : Optional[Any] = highway_now[car_index + 1 :] for cell in range(len(lowerCAmelCase_)): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(lowerCAmelCase_ , -1) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : int = len(lowerCAmelCase_) # Beforce calculations, the highway is empty lowerCamelCase_ : int = [-1] * number_of_cells for car_index in range(lowerCAmelCase_): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed lowerCamelCase_ : Union[str, Any] = min(highway_now[car_index] + 1 , lowerCAmelCase_) # Number of empty cell before the next car lowerCamelCase_ : Optional[int] = get_distance(lowerCAmelCase_ , lowerCAmelCase_) - 1 # We can't have the car causing an accident lowerCamelCase_ : Union[str, Any] = min(next_highway[car_index] , lowerCAmelCase_) if random() < probability: # Randomly, a driver will slow down lowerCamelCase_ : List[Any] = max(next_highway[car_index] - 1 , 0) return next_highway def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : List[str] = len(highway[0]) for i in range(lowerCAmelCase_): lowerCamelCase_ : List[str] = update(highway[i] , lowerCAmelCase_ , lowerCAmelCase_) lowerCamelCase_ : Optional[int] = [-1] * number_of_cells for car_index in range(lowerCAmelCase_): lowerCamelCase_ : Union[str, Any] = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) lowerCamelCase_ : Optional[int] = (car_index + speed) % number_of_cells # Commit the change of position lowerCamelCase_ : Any = speed highway.append(lowerCAmelCase_) return highway if __name__ == "__main__": import doctest doctest.testmod()
73
from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Dict = '''ctrl''' __UpperCAmelCase : Dict = ['''past_key_values'''] __UpperCAmelCase : int = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , a_=24_6534 , a_=256 , a_=1280 , a_=8192 , a_=48 , a_=16 , a_=0.1 , a_=0.1 , a_=1E-6 , a_=0.02 , a_=True , **a_ , ): lowerCamelCase_ : Dict = vocab_size lowerCamelCase_ : Any = n_positions lowerCamelCase_ : Optional[int] = n_embd lowerCamelCase_ : List[Any] = n_layer lowerCamelCase_ : Union[str, Any] = n_head lowerCamelCase_ : str = dff lowerCamelCase_ : Tuple = resid_pdrop lowerCamelCase_ : Any = embd_pdrop lowerCamelCase_ : Dict = layer_norm_epsilon lowerCamelCase_ : Tuple = initializer_range lowerCamelCase_ : Any = use_cache super().__init__(**a_ )
73
1
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor __magic_name__ = logging.get_logger(__name__) class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def __init__( self , *a_ , **a_ ): warnings.warn( "The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use BeitImageProcessor instead." , a_ , ) super().__init__(*a_ , **a_ )
73
import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process __magic_name__ = logging.getLogger(__name__) __magic_name__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) __magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCAmelCase__ : """simple docstring""" __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) }, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCamelCase )}, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) }, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, ) __UpperCAmelCase : bool = field( default=__lowerCamelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, ) __UpperCAmelCase : str = field( default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, ) __UpperCAmelCase : bool = field( default=__lowerCamelCase, metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) }, ) def _UpperCamelCase ( self ): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class lowerCAmelCase__ : """simple docstring""" __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __UpperCAmelCase : Optional[str] = field(default=__lowerCamelCase, metadata={'''help''': '''The input training data file (a text file).'''} ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''}, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''}, ) __UpperCAmelCase : bool = field( default=__lowerCamelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) __UpperCAmelCase : Optional[int] = field( default=5, metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' }, ) __UpperCAmelCase : Optional[int] = field( default=__lowerCamelCase, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated. Default to the max input length of the model.''' ) }, ) __UpperCAmelCase : Optional[int] = field( default=__lowerCamelCase, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, ) __UpperCAmelCase : float = field( default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} ) __UpperCAmelCase : bool = field( default=__lowerCamelCase, metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) }, ) def _UpperCamelCase ( self ): if self.train_file is not None: lowerCamelCase_ : str = self.train_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: lowerCamelCase_ : Union[str, Any] = self.validation_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' with open(lowerCAmelCase_ , "r" , encoding="utf-8") as f: lowerCamelCase_ : Tuple = [json.loads(lowerCAmelCase_) for line in f.read().splitlines() if (len(lowerCAmelCase_) > 0 and not line.isspace())] assert len(lowerCAmelCase_) == len(lowerCAmelCase_) lowerCamelCase_ : Any = {c: dataset[c] for c in dataset.column_names} lowerCamelCase_ : List[Any] = refs return Dataset.from_dict(lowerCAmelCase_) def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : str = parser.parse_args_into_dataclasses() # Detecting last checkpoint. lowerCamelCase_ : List[str] = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: lowerCamelCase_ : Dict = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome.") elif last_checkpoint is not None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch.") # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""") # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , lowerCAmelCase_) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowerCamelCase_ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name) if "validation" not in datasets.keys(): lowerCamelCase_ : Any = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , ) lowerCamelCase_ : Optional[int] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , ) else: lowerCamelCase_ : Dict = {} if data_args.train_file is not None: lowerCamelCase_ : str = data_args.train_file if data_args.validation_file is not None: lowerCamelCase_ : Any = data_args.validation_file lowerCamelCase_ : Any = data_args.train_file.split(".")[-1] if extension == "txt": lowerCamelCase_ : List[str] = "text" lowerCamelCase_ : Dict = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCamelCase_ : Optional[Any] = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_) elif model_args.model_name_or_path: lowerCamelCase_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_) else: lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(F"""Overriding config: {model_args.config_overrides}""") config.update_from_string(model_args.config_overrides) logger.info(F"""New config: {config}""") lowerCamelCase_ : List[str] = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: lowerCamelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_) elif model_args.model_name_or_path: lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name.") if model_args.model_name_or_path: lowerCamelCase_ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch") lowerCamelCase_ : Dict = AutoModelForMaskedLM.from_config(lowerCAmelCase_) model.resize_token_embeddings(len(lowerCAmelCase_)) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: lowerCamelCase_ : Optional[Any] = datasets["train"].column_names else: lowerCamelCase_ : Dict = datasets["validation"].column_names lowerCamelCase_ : Union[str, Any] = "text" if "text" in column_names else column_names[0] lowerCamelCase_ : Optional[Any] = "max_length" if data_args.pad_to_max_length else False def tokenize_function(lowerCAmelCase_): # Remove empty lines lowerCamelCase_ : str = [line for line in examples["text"] if len(lowerCAmelCase_) > 0 and not line.isspace()] return tokenizer(examples["text"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length) lowerCamelCase_ : str = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: lowerCamelCase_ : List[Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file) if data_args.validation_ref_file is not None: lowerCamelCase_ : List[str] = add_chinese_references( tokenized_datasets["validation"] , data_args.validation_ref_file) # If we have ref files, need to avoid it removed by trainer lowerCamelCase_ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file if has_ref: lowerCamelCase_ : Union[str, Any] = False # Data collator # This one will take care of randomly masking the tokens. lowerCamelCase_ : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability) # Initialize our Trainer lowerCamelCase_ : int = Trainer( model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , ) # Training if training_args.do_train: if last_checkpoint is not None: lowerCamelCase_ : Dict = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path): lowerCamelCase_ : Dict = model_args.model_name_or_path else: lowerCamelCase_ : int = None lowerCamelCase_ : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_) trainer.save_model() # Saves the tokenizer too for easy upload lowerCamelCase_ : Tuple = os.path.join(training_args.output_dir , "train_results.txt") if trainer.is_world_process_zero(): with open(lowerCAmelCase_ , "w") as writer: logger.info("***** Train results *****") for key, value in sorted(train_result.metrics.items()): logger.info(F""" {key} = {value}""") writer.write(F"""{key} = {value}\n""") # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json")) # Evaluation lowerCamelCase_ : Dict = {} if training_args.do_eval: logger.info("*** Evaluate ***") lowerCamelCase_ : Tuple = trainer.evaluate() lowerCamelCase_ : str = math.exp(eval_output["eval_loss"]) lowerCamelCase_ : Tuple = perplexity lowerCamelCase_ : int = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt") if trainer.is_world_process_zero(): with open(lowerCAmelCase_ , "w") as writer: logger.info("***** Eval results *****") for key, value in sorted(results.items()): logger.info(F""" {key} = {value}""") writer.write(F"""{key} = {value}\n""") return results def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' main() if __name__ == "__main__": main()
73
1
__magic_name__ = 9.8_06_65 def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = g): '''simple docstring''' if fluid_density <= 0: raise ValueError("Impossible fluid density") if volume < 0: raise ValueError("Impossible Object volume") if gravity <= 0: raise ValueError("Impossible Gravity") return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
73
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class lowerCAmelCase__ : """simple docstring""" # setable values __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Optional[jnp.ndarray] = None __UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def _UpperCamelCase ( cls ): return cls() @dataclass class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : KarrasVeSchedulerState class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase ): """simple docstring""" @property def _UpperCamelCase ( self ): return True @register_to_config def __init__( self , a_ = 0.02 , a_ = 100 , a_ = 1.0_07 , a_ = 80 , a_ = 0.05 , a_ = 50 , ): pass def _UpperCamelCase ( self ): return KarrasVeSchedulerState.create() def _UpperCamelCase ( self , a_ , a_ , a_ = () ): lowerCamelCase_ : List[Any] = jnp.arange(0 , a_ )[::-1].copy() lowerCamelCase_ : List[str] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=a_ , schedule=jnp.array(a_ , dtype=jnp.floataa ) , timesteps=a_ , ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , ): if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase_ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: lowerCamelCase_ : Optional[int] = 0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase_ : Union[str, Any] = random.split(a_ , num=1 ) lowerCamelCase_ : str = self.config.s_noise * random.normal(key=a_ , shape=sample.shape ) lowerCamelCase_ : List[str] = sigma + gamma * sigma lowerCamelCase_ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ = True , ): lowerCamelCase_ : List[str] = sample_hat + sigma_hat * model_output lowerCamelCase_ : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat lowerCamelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ): lowerCamelCase_ : Optional[Any] = sample_prev + sigma_prev * model_output lowerCamelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev lowerCamelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ): raise NotImplementedError()
73
1
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) __magic_name__ = logging.getLogger(__name__) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int) __magic_name__ = parser.parse_args() logger.info(f'''Loading data from {args.data_file}''') with open(args.data_file, '''rb''') as fp: __magic_name__ = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') __magic_name__ = Counter() for tk_ids in data: counter.update(tk_ids) __magic_name__ = [0] * args.vocab_size for k, v in counter.items(): __magic_name__ = v logger.info(f'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
73
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Any = StableDiffusionDiffEditPipeline __UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} __UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} __UpperCAmelCase : List[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __UpperCAmelCase : List[str] = frozenset([] ) def _UpperCamelCase ( self ): torch.manual_seed(0 ) lowerCamelCase_ : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , ) lowerCamelCase_ : str = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , ) lowerCamelCase_ : Dict = DDIMInverseScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_zero=a_ , ) torch.manual_seed(0 ) lowerCamelCase_ : List[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCamelCase_ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) lowerCamelCase_ : Optional[Any] = CLIPTextModel(a_ ) lowerCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowerCamelCase_ : Optional[Any] = { "unet": unet, "scheduler": scheduler, "inverse_scheduler": inverse_scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def _UpperCamelCase ( self , a_ , a_=0 ): lowerCamelCase_ : str = floats_tensor((1, 16, 16) , rng=random.Random(a_ ) ).to(a_ ) lowerCamelCase_ : List[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a_ ) ).to(a_ ) if str(a_ ).startswith("mps" ): lowerCamelCase_ : List[Any] = torch.manual_seed(a_ ) else: lowerCamelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(a_ ) lowerCamelCase_ : Tuple = { "prompt": "a dog and a newt", "mask_image": mask, "image_latents": latents, "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _UpperCamelCase ( self , a_ , a_=0 ): lowerCamelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ ) lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase_ : Any = Image.fromarray(np.uinta(a_ ) ).convert("RGB" ) if str(a_ ).startswith("mps" ): lowerCamelCase_ : Tuple = torch.manual_seed(a_ ) else: lowerCamelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(a_ ) lowerCamelCase_ : int = { "image": image, "source_prompt": "a cat and a frog", "target_prompt": "a dog and a newt", "generator": generator, "num_inference_steps": 2, "num_maps_per_mask": 2, "mask_encode_strength": 1.0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _UpperCamelCase ( self , a_ , a_=0 ): lowerCamelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ ) lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase_ : Optional[int] = Image.fromarray(np.uinta(a_ ) ).convert("RGB" ) if str(a_ ).startswith("mps" ): lowerCamelCase_ : Optional[int] = torch.manual_seed(a_ ) else: lowerCamelCase_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ ) lowerCamelCase_ : Union[str, Any] = { "image": image, "prompt": "a cat and a frog", "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "decode_latents": True, "output_type": "numpy", } return inputs def _UpperCamelCase ( self ): if not hasattr(self.pipeline_class , "_optional_components" ): return lowerCamelCase_ : List[Any] = self.get_dummy_components() lowerCamelCase_ : int = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(a_ , a_ , a_ ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) lowerCamelCase_ : int = self.get_dummy_inputs(a_ ) lowerCamelCase_ : int = pipe(**a_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(a_ ) lowerCamelCase_ : Optional[int] = self.pipeline_class.from_pretrained(a_ ) pipe_loaded.to(a_ ) pipe_loaded.set_progress_bar_config(disable=a_ ) for optional_component in pipe._optional_components: self.assertTrue( getattr(a_ , a_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , ) lowerCamelCase_ : List[str] = self.get_dummy_inputs(a_ ) lowerCamelCase_ : Optional[int] = pipe_loaded(**a_ )[0] lowerCamelCase_ : Optional[int] = np.abs(output - output_loaded ).max() self.assertLess(a_ , 1E-4 ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = "cpu" lowerCamelCase_ : int = self.get_dummy_components() lowerCamelCase_ : List[Any] = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : Any = self.get_dummy_mask_inputs(a_ ) lowerCamelCase_ : int = pipe.generate_mask(**a_ ) lowerCamelCase_ : List[Any] = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) lowerCamelCase_ : List[str] = np.array([0] * 9 ) lowerCamelCase_ : Optional[int] = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_ , 1E-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = "cpu" lowerCamelCase_ : Union[str, Any] = self.get_dummy_components() lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : Dict = self.get_dummy_inversion_inputs(a_ ) lowerCamelCase_ : Dict = pipe.invert(**a_ ).images lowerCamelCase_ : str = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowerCamelCase_ : Dict = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_ , 1E-3 ) def _UpperCamelCase ( self ): super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = "cpu" lowerCamelCase_ : int = self.get_dummy_components() lowerCamelCase_ : int = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"} lowerCamelCase_ : Optional[Any] = DPMSolverMultistepScheduler(**a_ ) lowerCamelCase_ : List[str] = DPMSolverMultistepInverseScheduler(**a_ ) lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : int = self.get_dummy_inversion_inputs(a_ ) lowerCamelCase_ : str = pipe.invert(**a_ ).images lowerCamelCase_ : int = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowerCamelCase_ : Union[str, Any] = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) lowerCamelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_ , 1E-3 ) @require_torch_gpu @slow class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def _UpperCamelCase ( cls ): lowerCamelCase_ : Dict = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" ) lowerCamelCase_ : int = raw_image.convert("RGB" ).resize((768, 768) ) lowerCamelCase_ : List[Any] = raw_image def _UpperCamelCase ( self ): lowerCamelCase_ : Dict = torch.manual_seed(0 ) lowerCamelCase_ : Tuple = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa ) lowerCamelCase_ : str = DDIMScheduler.from_config(pipe.scheduler.config ) lowerCamelCase_ : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : str = "a bowl of fruit" lowerCamelCase_ : Optional[int] = "a bowl of pears" lowerCamelCase_ : List[Any] = pipe.generate_mask( image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , ) lowerCamelCase_ : str = pipe.invert( prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ ).latents lowerCamelCase_ : List[str] = pipe( prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0] lowerCamelCase_ : List[str] = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 ) lowerCamelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa ) lowerCamelCase_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) lowerCamelCase_ : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : Any = "a bowl of fruit" lowerCamelCase_ : Dict = "a bowl of pears" lowerCamelCase_ : Optional[Any] = pipe.generate_mask( image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , ) lowerCamelCase_ : str = pipe.invert( prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ , num_inference_steps=25 , ).latents lowerCamelCase_ : Any = pipe( prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0] lowerCamelCase_ : List[str] = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
73
1
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device __magic_name__ = False class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" pass @nightly @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : List[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) lowerCamelCase_ : Any = torch.manual_seed(0 ) lowerCamelCase_ : Union[str, Any] = pipe.dual_guided( prompt="first prompt" , image=a_ , text_to_image_strength=0.75 , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(a_ ) lowerCamelCase_ : List[str] = VersatileDiffusionPipeline.from_pretrained(a_ , torch_dtype=torch.floataa ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : List[str] = generator.manual_seed(0 ) lowerCamelCase_ : Optional[int] = pipe.dual_guided( prompt="first prompt" , image=a_ , text_to_image_strength=0.75 , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def _UpperCamelCase ( self ): lowerCamelCase_ : Dict = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : int = "cyberpunk 2077" lowerCamelCase_ : List[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) lowerCamelCase_ : str = torch.manual_seed(0 ) lowerCamelCase_ : Tuple = pipe.dual_guided( prompt=a_ , image=a_ , text_to_image_strength=0.75 , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images lowerCamelCase_ : List[str] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase_ : Any = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 lowerCamelCase_ : Union[str, Any] = "A painting of a squirrel eating a burger " lowerCamelCase_ : Union[str, Any] = torch.manual_seed(0 ) lowerCamelCase_ : List[Any] = pipe.text_to_image( prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images lowerCamelCase_ : List[Any] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase_ : str = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 lowerCamelCase_ : Union[str, Any] = pipe.image_variation(a_ , generator=a_ , output_type="numpy" ).images lowerCamelCase_ : Tuple = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase_ : Optional[Any] = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
73
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase ( self ): lowerCamelCase_ : int = ["a", "b", "c"] # Defaults to last layer if both are None lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , a_ , a_ ) self.assertEqual(a_ , ["c"] ) self.assertEqual(a_ , [2] ) # Out indices set to match out features lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ ) self.assertEqual(a_ , ["a", "c"] ) self.assertEqual(a_ , [0, 2] ) # Out features set to match out indices lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ ) self.assertEqual(a_ , ["a", "c"] ) self.assertEqual(a_ , [0, 2] ) # Out features selected from negative indices lowerCamelCase_ ,lowerCamelCase_ : Dict = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ ) self.assertEqual(a_ , ["a", "c"] ) self.assertEqual(a_ , [-3, -1] ) def _UpperCamelCase ( self ): # Stage names must be set with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ ) # Out features must be a list with self.assertRaises(a_ ): verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] ) # Out features must be a subset of stage names with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] ) # Out indices must be a list or tuple with self.assertRaises(a_ ): verify_out_features_out_indices(a_ , 0 , ["a", "b"] ) # Out indices must be a subset of stage names with self.assertRaises(a_ ): verify_out_features_out_indices(a_ , (0, 1) , ["a"] ) # Out features and out indices must be the same length with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] ) # Out features should match out indices with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] ) # Out features and out indices should be in order with self.assertRaises(a_ ): verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] ) # Check passes with valid inputs verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = BackboneMixin() lowerCamelCase_ : List[Any] = ["a", "b", "c"] lowerCamelCase_ : Optional[int] = ["a", "c"] lowerCamelCase_ : Dict = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly lowerCamelCase_ : Union[str, Any] = ["a", "b"] self.assertEqual(backbone.out_features , ["a", "b"] ) self.assertEqual(backbone.out_indices , [0, 1] ) lowerCamelCase_ : str = [-3, -1] self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [-3, -1] )
73
1
from __future__ import annotations def __magic_name__ ( lowerCAmelCase_ = 4): '''simple docstring''' lowerCamelCase_ : Tuple = abs(lowerCAmelCase_) or 4 return [[1 + x + y * row_size for x in range(lowerCAmelCase_)] for y in range(lowerCAmelCase_)] def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' return reverse_row(transpose(lowerCAmelCase_)) # OR.. transpose(reverse_column(matrix)) def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' return reverse_row(reverse_column(lowerCAmelCase_)) # OR.. reverse_column(reverse_row(matrix)) def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' return reverse_column(transpose(lowerCAmelCase_)) # OR.. transpose(reverse_row(matrix)) def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[int] = [list(lowerCAmelCase_) for x in zip(*lowerCAmelCase_)] return matrix def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[Any] = matrix[::-1] return matrix def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[int] = [x[::-1] for x in matrix] return matrix def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' for i in matrix: print(*lowerCAmelCase_) if __name__ == "__main__": __magic_name__ = make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 90 counterclockwise:\n''') print_matrix(rotate_aa(matrix)) __magic_name__ = make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 180:\n''') print_matrix(rotate_aaa(matrix)) __magic_name__ = make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 270 counterclockwise:\n''') print_matrix(rotate_aaa(matrix))
73
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils ) __UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] ) __UpperCAmelCase : Tuple = ['''accelerate''', '''launch'''] __UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate''' __UpperCAmelCase : int = '''default_config.yaml''' __UpperCAmelCase : Tuple = config_folder / config_file __UpperCAmelCase : int = config_folder / '''_default_config.yaml''' __UpperCAmelCase : int = Path('''tests/test_configs''' ) @classmethod def _UpperCamelCase ( cls ): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def _UpperCamelCase ( cls ): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def _UpperCamelCase ( self ): for config in sorted(self.test_config_path.glob("**/*.yaml" ) ): with self.subTest(config_file=a_ ): execute_subprocess_async( self.base_cmd + ["--config_file", str(a_ ), self.test_file_path] , env=os.environ.copy() ) def _UpperCamelCase ( self ): execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() ) class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase : List[Any] = '''test-tpu''' __UpperCAmelCase : Tuple = '''us-central1-a''' __UpperCAmelCase : Tuple = '''ls''' __UpperCAmelCase : str = ['''accelerate''', '''tpu-config'''] __UpperCAmelCase : Dict = '''cd /usr/share''' __UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh''' __UpperCAmelCase : Dict = '''Running gcloud compute tpus tpu-vm ssh''' def _UpperCamelCase ( self ): lowerCamelCase_ : Any = run_command( self.cmd + ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = run_command( self.cmd + [ "--config_file", "tests/test_configs/0_12_0.yaml", "--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug", ] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Union[str, Any] = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a_ ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Any = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = run_command( self.cmd + [ "--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--command", "echo \"Hello World\"", "--debug", ] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Dict = run_command( self.cmd + [ "--config_file", "tests/test_configs/0_12_0.yaml", "--command_file", self.command_file, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug", ] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : str = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Any = run_command( self.cmd + [ "--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--accelerate_version", "12.0.0", "--debug", ] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
73
1
import math def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Union[str, Any] = [True] * n lowerCamelCase_ : List[str] = False lowerCamelCase_ : Optional[int] = False lowerCamelCase_ : Optional[int] = True for i in range(3 , int(n**0.5 + 1) , 2): lowerCamelCase_ : Dict = i * 2 while index < n: lowerCamelCase_ : Any = False lowerCamelCase_ : int = index + i lowerCamelCase_ : List[str] = [2] for i in range(3 , lowerCAmelCase_ , 2): if is_prime[i]: primes.append(lowerCAmelCase_) return primes def __magic_name__ ( lowerCAmelCase_ = 9999_6666_3333): '''simple docstring''' lowerCamelCase_ : Tuple = math.floor(math.sqrt(lowerCAmelCase_)) + 100 lowerCamelCase_ : List[str] = prime_sieve(lowerCAmelCase_) lowerCamelCase_ : Any = 0 lowerCamelCase_ : str = 0 lowerCamelCase_ : List[Any] = primes[prime_index] while (last_prime**2) <= limit: lowerCamelCase_ : str = primes[prime_index + 1] lowerCamelCase_ : Optional[Any] = last_prime**2 lowerCamelCase_ : Union[str, Any] = next_prime**2 # Get numbers divisible by lps(current) lowerCamelCase_ : Dict = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) lowerCamelCase_ : Dict = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps lowerCamelCase_ : Dict = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair lowerCamelCase_ : List[str] = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
73
import inspect from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel, VQModel from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def __init__( self , a_ , a_ , a_ ): super().__init__() self.register_modules(vqvae=a_ , unet=a_ , scheduler=a_ ) @torch.no_grad() def __call__( self , a_ = 1 , a_ = None , a_ = 0.0 , a_ = 50 , a_ = "pil" , a_ = True , **a_ , ): lowerCamelCase_ : Optional[Any] = randn_tensor( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , ) lowerCamelCase_ : Optional[int] = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowerCamelCase_ : Optional[int] = latents * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(a_ ) # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature lowerCamelCase_ : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCamelCase_ : Optional[int] = {} if accepts_eta: lowerCamelCase_ : Optional[int] = eta for t in self.progress_bar(self.scheduler.timesteps ): lowerCamelCase_ : Dict = self.scheduler.scale_model_input(a_ , a_ ) # predict the noise residual lowerCamelCase_ : Optional[Any] = self.unet(a_ , a_ ).sample # compute the previous noisy sample x_t -> x_t-1 lowerCamelCase_ : List[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample # decode the image latents with the VAE lowerCamelCase_ : str = self.vqvae.decode(a_ ).sample lowerCamelCase_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 ) lowerCamelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCamelCase_ : Optional[Any] = self.numpy_to_pil(a_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=a_ )
73
1
import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __magic_name__ = 1_6 __magic_name__ = 3_2 def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ = 16): '''simple docstring''' lowerCamelCase_ : Tuple = AutoTokenizer.from_pretrained("bert-base-cased") lowerCamelCase_ : Tuple = load_dataset("glue" , "mrpc") def tokenize_function(lowerCAmelCase_): # max_length=None => use the model max length (it's actually the default) lowerCamelCase_ : Optional[int] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCamelCase_ : Tuple = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase_ : List[Any] = tokenized_datasets.rename_column("label" , "labels") def collate_fn(lowerCAmelCase_): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCamelCase_ : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCamelCase_ : Optional[int] = 16 elif accelerator.mixed_precision != "no": lowerCamelCase_ : Optional[int] = 8 else: lowerCamelCase_ : Optional[int] = None return tokenizer.pad( lowerCAmelCase_ , padding="longest" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="pt" , ) # Instantiate dataloaders. lowerCamelCase_ : str = DataLoader( tokenized_datasets["train"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , drop_last=lowerCAmelCase_) lowerCamelCase_ : int = DataLoader( tokenized_datasets["validation"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , drop_last=(accelerator.mixed_precision == "fp8") , ) return train_dataloader, eval_dataloader def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase_ : Union[str, Any] = config["lr"] lowerCamelCase_ : Any = int(config["num_epochs"]) lowerCamelCase_ : int = int(config["seed"]) lowerCamelCase_ : List[Any] = int(config["batch_size"]) lowerCamelCase_ : str = evaluate.load("glue" , "mrpc") # If the batch size is too big we use gradient accumulation lowerCamelCase_ : str = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowerCamelCase_ : str = batch_size // MAX_GPU_BATCH_SIZE lowerCamelCase_ : Any = MAX_GPU_BATCH_SIZE set_seed(lowerCAmelCase_) lowerCamelCase_ ,lowerCamelCase_ : List[Any] = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCAmelCase_) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCamelCase_ : Union[str, Any] = model.to(accelerator.device) # Instantiate optimizer lowerCamelCase_ : str = AdamW(params=model.parameters() , lr=lowerCAmelCase_) # Instantiate scheduler lowerCamelCase_ : Tuple = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : int = accelerator.prepare( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) # Now we train the model for epoch in range(lowerCAmelCase_): model.train() for step, batch in enumerate(lowerCAmelCase_): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) lowerCamelCase_ : Any = model(**lowerCAmelCase_) lowerCamelCase_ : Tuple = outputs.loss lowerCamelCase_ : int = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase_) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase_): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): lowerCamelCase_ : str = model(**lowerCAmelCase_) lowerCamelCase_ : str = outputs.logits.argmax(dim=-1) lowerCamelCase_ ,lowerCamelCase_ : Any = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , ) lowerCamelCase_ : List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase_) def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : int = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU.") lowerCamelCase_ : str = parser.parse_args() lowerCamelCase_ : Tuple = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(lowerCAmelCase_ , lowerCAmelCase_) if __name__ == "__main__": main()
73
import re def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' if len(re.findall("[ATCG]" , lowerCAmelCase_)) != len(lowerCAmelCase_): raise ValueError("Invalid Strand") return dna.translate(dna.maketrans("ATCG" , "TAGC")) if __name__ == "__main__": import doctest doctest.testmod()
73
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : Optional[Any] = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png" lowerCamelCase_ : Dict = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_).raw).convert("RGB") return image def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Dict = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding")) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding")) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight")) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias")) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight")) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias")) for i in range(config.vision_config.num_hidden_layers): rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""")) rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""")) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""")) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""")) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""")) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",)) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""")) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""")) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""")) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""")) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""")) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight")) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias")) # fmt: on return rename_keys def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Any = dct.pop(lowerCAmelCase_) lowerCamelCase_ : Any = val def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' for i in range(config.vision_config.num_hidden_layers): # read in original q and v biases lowerCamelCase_ : str = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""") lowerCamelCase_ : List[str] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""") # next, set bias in the state dict lowerCamelCase_ : List[Any] = torch.cat((q_bias, torch.zeros_like(lowerCAmelCase_ , requires_grad=lowerCAmelCase_), v_bias)) lowerCamelCase_ : int = qkv_bias def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Dict = 364 if "coco" in model_name else 224 lowerCamelCase_ : Optional[int] = BlipaVisionConfig(image_size=lowerCAmelCase_).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: lowerCamelCase_ : Optional[Any] = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=lowerCAmelCase_).to_dict() elif "opt-6.7b" in model_name: lowerCamelCase_ : Tuple = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=lowerCAmelCase_).to_dict() elif "t5-xl" in model_name: lowerCamelCase_ : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1).to_dict() elif "t5-xxl" in model_name: lowerCamelCase_ : str = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1).to_dict() lowerCamelCase_ : Optional[Any] = BlipaConfig(vision_config=lowerCAmelCase_ , text_config=lowerCAmelCase_) return config, image_size @torch.no_grad() def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=False): '''simple docstring''' lowerCamelCase_ : Dict = ( AutoTokenizer.from_pretrained("facebook/opt-2.7b") if "opt" in model_name else AutoTokenizer.from_pretrained("google/flan-t5-xl") ) lowerCamelCase_ : int = tokenizer("\n" , add_special_tokens=lowerCAmelCase_).input_ids[0] lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_blipa_config(lowerCAmelCase_ , eos_token_id=lowerCAmelCase_) lowerCamelCase_ : List[Any] = BlipaForConditionalGeneration(lowerCAmelCase_).eval() lowerCamelCase_ : Any = { "blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"), "blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"), "blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"), "blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"), "blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"), "blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"), "blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"), } lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = model_name_to_original[model_name] # load original model print("Loading original model...") lowerCamelCase_ : List[Any] = "cuda" if torch.cuda.is_available() else "cpu" lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[Any] = load_model_and_preprocess( name=lowerCAmelCase_ , model_type=lowerCAmelCase_ , is_eval=lowerCAmelCase_ , device=lowerCAmelCase_) original_model.eval() print("Done!") # update state dict keys lowerCamelCase_ : Optional[Any] = original_model.state_dict() lowerCamelCase_ : Optional[int] = create_rename_keys(lowerCAmelCase_) for src, dest in rename_keys: rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): lowerCamelCase_ : List[str] = state_dict.pop(lowerCAmelCase_) if key.startswith("Qformer.bert"): lowerCamelCase_ : List[Any] = key.replace("Qformer.bert" , "qformer") if "attention.self" in key: lowerCamelCase_ : Optional[Any] = key.replace("self" , "attention") if "opt_proj" in key: lowerCamelCase_ : str = key.replace("opt_proj" , "language_projection") if "t5_proj" in key: lowerCamelCase_ : Union[str, Any] = key.replace("t5_proj" , "language_projection") if key.startswith("opt"): lowerCamelCase_ : str = key.replace("opt" , "language") if key.startswith("t5"): lowerCamelCase_ : Union[str, Any] = key.replace("t5" , "language") lowerCamelCase_ : Tuple = val # read in qv biases read_in_q_v_bias(lowerCAmelCase_ , lowerCAmelCase_) lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = hf_model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_) assert len(lowerCAmelCase_) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] lowerCamelCase_ : int = load_demo_image() lowerCamelCase_ : Union[str, Any] = vis_processors["eval"](lowerCAmelCase_).unsqueeze(0).to(lowerCAmelCase_) lowerCamelCase_ : int = tokenizer(["\n"] , return_tensors="pt").input_ids.to(lowerCAmelCase_) # create processor lowerCamelCase_ : str = BlipImageProcessor( size={"height": image_size, "width": image_size} , image_mean=lowerCAmelCase_ , image_std=lowerCAmelCase_) lowerCamelCase_ : int = BlipaProcessor(image_processor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_) lowerCamelCase_ : Dict = processor(images=lowerCAmelCase_ , return_tensors="pt").pixel_values.to(lowerCAmelCase_) # make sure processor creates exact same pixel values assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_) original_model.to(lowerCAmelCase_) hf_model.to(lowerCAmelCase_) with torch.no_grad(): if "opt" in model_name: lowerCamelCase_ : List[str] = original_model({"image": original_pixel_values, "text_input": [""]}).logits lowerCamelCase_ : str = hf_model(lowerCAmelCase_ , lowerCAmelCase_).logits else: lowerCamelCase_ : Union[str, Any] = original_model( {"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]}).logits lowerCamelCase_ : Any = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100) lowerCamelCase_ : str = hf_model(lowerCAmelCase_ , lowerCAmelCase_ , labels=lowerCAmelCase_).logits assert original_logits.shape == logits.shape print("First values of original logits:" , original_logits[0, :3, :3]) print("First values of HF logits:" , logits[0, :3, :3]) # assert values if model_name == "blip2-flan-t5-xl": lowerCamelCase_ : List[str] = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=lowerCAmelCase_) assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase_ , atol=1E-4) elif model_name == "blip2-flan-t5-xl-coco": lowerCamelCase_ : int = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=lowerCAmelCase_) else: # cast to same type lowerCamelCase_ : Optional[int] = logits.dtype assert torch.allclose(original_logits.to(lowerCAmelCase_) , lowerCAmelCase_ , atol=1E-2) print("Looks ok!") print("Generating a caption...") lowerCamelCase_ : str = "" lowerCamelCase_ : Optional[Any] = tokenizer(lowerCAmelCase_ , return_tensors="pt").input_ids.to(lowerCAmelCase_) lowerCamelCase_ : Tuple = original_model.generate({"image": original_pixel_values}) lowerCamelCase_ : Union[str, Any] = hf_model.generate( lowerCAmelCase_ , lowerCAmelCase_ , do_sample=lowerCAmelCase_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print("Original generation:" , lowerCAmelCase_) lowerCamelCase_ : Dict = input_ids.shape[1] lowerCamelCase_ : Dict = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowerCAmelCase_) lowerCamelCase_ : Any = [text.strip() for text in output_text] print("HF generation:" , lowerCAmelCase_) if pytorch_dump_folder_path is not None: processor.save_pretrained(lowerCAmelCase_) hf_model.save_pretrained(lowerCAmelCase_) if push_to_hub: processor.push_to_hub(F"""nielsr/{model_name}""") hf_model.push_to_hub(F"""nielsr/{model_name}""") if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() __magic_name__ = [ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''blip2-opt-2.7b''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) __magic_name__ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
73
from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False): '''simple docstring''' if radian_mode: return [magnitude * cos(lowerCAmelCase_), magnitude * sin(lowerCAmelCase_)] return [magnitude * cos(radians(lowerCAmelCase_)), magnitude * sin(radians(lowerCAmelCase_))] def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10**-1): '''simple docstring''' lowerCamelCase_ : NDArray[floataa] = cross(lowerCAmelCase_ , lowerCAmelCase_) lowerCamelCase_ : float = sum(lowerCAmelCase_) return abs(lowerCAmelCase_) < eps if __name__ == "__main__": # Test to check if it works __magic_name__ = array( [ polar_force(7_18.4, 1_8_0 - 3_0), polar_force(8_79.54, 4_5), polar_force(1_0_0, -9_0), ] ) __magic_name__ = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg __magic_name__ = array( [ polar_force(3_0 * 9.81, 1_5), polar_force(2_1_5, 1_8_0 - 4_5), polar_force(2_6_4, 9_0 - 3_0), ] ) __magic_name__ = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg __magic_name__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]]) __magic_name__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
73
1
import numpy as np def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1E-12 , lowerCAmelCase_ = 100 , ): '''simple docstring''' assert np.shape(lowerCAmelCase_)[0] == np.shape(lowerCAmelCase_)[1] # Ensure proper dimensionality. assert np.shape(lowerCAmelCase_)[0] == np.shape(lowerCAmelCase_)[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(lowerCAmelCase_) == np.iscomplexobj(lowerCAmelCase_) lowerCamelCase_ : Tuple = np.iscomplexobj(lowerCAmelCase_) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(lowerCAmelCase_ , input_matrix.conj().T) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. lowerCamelCase_ : Union[str, Any] = False lowerCamelCase_ : Tuple = 0 lowerCamelCase_ : List[Any] = 0 lowerCamelCase_ : Optional[Any] = 1E12 while not convergence: # Multiple matrix by the vector. lowerCamelCase_ : Tuple = np.dot(lowerCAmelCase_ , lowerCAmelCase_) # Normalize the resulting output vector. lowerCamelCase_ : str = w / np.linalg.norm(lowerCAmelCase_) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) lowerCamelCase_ : Optional[Any] = vector.conj().T if is_complex else vector.T lowerCamelCase_ : Union[str, Any] = np.dot(lowerCAmelCase_ , np.dot(lowerCAmelCase_ , lowerCAmelCase_)) # Check convergence. lowerCamelCase_ : str = np.abs(lambda_ - lambda_previous) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: lowerCamelCase_ : List[Any] = True lowerCamelCase_ : Tuple = lambda_ if is_complex: lowerCamelCase_ : Any = np.real(lambda_) return lambda_, vector def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]]) lowerCamelCase_ : int = np.array([41, 4, 20]) lowerCamelCase_ : Union[str, Any] = real_input_matrix.astype(np.complexaaa) lowerCamelCase_ : List[Any] = np.triu(1J * complex_input_matrix , 1) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T lowerCamelCase_ : str = np.array([41, 4, 20]).astype(np.complexaaa) for problem_type in ["real", "complex"]: if problem_type == "real": lowerCamelCase_ : List[Any] = real_input_matrix lowerCamelCase_ : Union[str, Any] = real_vector elif problem_type == "complex": lowerCamelCase_ : Tuple = complex_input_matrix lowerCamelCase_ : Any = complex_vector # Our implementation. lowerCamelCase_ ,lowerCamelCase_ : str = power_iteration(lowerCAmelCase_ , lowerCAmelCase_) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). lowerCamelCase_ ,lowerCamelCase_ : int = np.linalg.eigh(lowerCAmelCase_) # Last eigenvalue is the maximum one. lowerCamelCase_ : Union[str, Any] = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. lowerCamelCase_ : List[str] = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(lowerCAmelCase_) - np.abs(lowerCAmelCase_)) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
73
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Dict = '''ClapFeatureExtractor''' __UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self , a_ , a_ ): super().__init__(a_ , a_ ) def __call__( self , a_=None , a_=None , a_=None , **a_ ): lowerCamelCase_ : Any = kwargs.pop("sampling_rate" , a_ ) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none." ) if text is not None: lowerCamelCase_ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if audios is not None: lowerCamelCase_ : List[str] = self.feature_extractor( a_ , sampling_rate=a_ , return_tensors=a_ , **a_ ) if text is not None and audios is not None: lowerCamelCase_ : List[str] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def _UpperCamelCase ( self , *a_ , **a_ ): return self.tokenizer.batch_decode(*a_ , **a_ ) def _UpperCamelCase ( self , *a_ , **a_ ): return self.tokenizer.decode(*a_ , **a_ ) @property def _UpperCamelCase ( self ): lowerCamelCase_ : int = self.tokenizer.model_input_names lowerCamelCase_ : Dict = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
73
1
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def __init__( self , a_ , a_ , a_ ): lowerCamelCase_ : Dict = dataset lowerCamelCase_ : Any = process lowerCamelCase_ : List[Any] = params def __len__( self ): return len(self.dataset ) def __getitem__( self , a_ ): lowerCamelCase_ : Optional[Any] = self.dataset[i] lowerCamelCase_ : List[str] = self.process(a_ , **self.params ) return processed class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def __init__( self , a_ , a_ , a_ , a_=None ): lowerCamelCase_ : int = loader lowerCamelCase_ : Optional[int] = infer lowerCamelCase_ : Union[str, Any] = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether lowerCamelCase_ : Optional[int] = None lowerCamelCase_ : List[Any] = loader_batch_size # Internal bookkeeping lowerCamelCase_ : str = None lowerCamelCase_ : List[Any] = None def __len__( self ): return len(self.loader ) def __iter__( self ): lowerCamelCase_ : Any = iter(self.loader ) return self def _UpperCamelCase ( self ): if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice lowerCamelCase_ : Union[str, Any] = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) lowerCamelCase_ : Optional[int] = {} for k, element in self._loader_batch_data.items(): if isinstance(a_ , a_ ): # Convert ModelOutput to tuple first lowerCamelCase_ : Union[str, Any] = element.to_tuple() if isinstance(element[0] , torch.Tensor ): lowerCamelCase_ : Optional[int] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase_ : Optional[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(a_ , a_ ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): lowerCamelCase_ : Optional[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase_ : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around lowerCamelCase_ : Dict = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase_ : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase_ : Any = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. lowerCamelCase_ : Optional[Any] = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 lowerCamelCase_ : List[Any] = self._loader_batch_data.__class__(a_ ) self._loader_batch_index += 1 return result def _UpperCamelCase ( self ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch lowerCamelCase_ : str = next(self.iterator ) lowerCamelCase_ : Optional[Any] = self.infer(a_ , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(a_ , torch.Tensor ): lowerCamelCase_ : Union[str, Any] = processed else: lowerCamelCase_ : str = list(processed.keys() )[0] lowerCamelCase_ : Any = processed[key] if isinstance(a_ , a_ ): lowerCamelCase_ : Tuple = len(a_ ) else: lowerCamelCase_ : Optional[Any] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase_ : List[str] = observed_batch_size # Setting internal index to unwrap the batch lowerCamelCase_ : Any = processed lowerCamelCase_ : Dict = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def __init__( self , a_ , a_ , a_ , a_=None ): super().__init__(a_ , a_ , a_ ) def __iter__( self ): lowerCamelCase_ : Optional[int] = iter(self.loader ) lowerCamelCase_ : List[str] = None return self def _UpperCamelCase ( self ): if self.subiterator is None: lowerCamelCase_ : Any = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item lowerCamelCase_ : str = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators lowerCamelCase_ : Tuple = self.infer(next(self.iterator ) , **self.params ) lowerCamelCase_ : Dict = next(self.subiterator ) return processed class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def __iter__( self ): lowerCamelCase_ : Dict = iter(self.loader ) return self def _UpperCamelCase ( self ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. lowerCamelCase_ : Tuple = False lowerCamelCase_ : Union[str, Any] = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: lowerCamelCase_ : Tuple = self.loader_batch_item() lowerCamelCase_ : Tuple = item.pop("is_last" ) accumulator.append(a_ ) if is_last: return accumulator while not is_last: lowerCamelCase_ : List[str] = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(a_ , torch.Tensor ): lowerCamelCase_ : List[str] = processed else: lowerCamelCase_ : Union[str, Any] = list(processed.keys() )[0] lowerCamelCase_ : Any = processed[key] if isinstance(a_ , a_ ): lowerCamelCase_ : List[str] = len(a_ ) else: lowerCamelCase_ : int = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase_ : str = observed_batch_size lowerCamelCase_ : Optional[int] = processed lowerCamelCase_ : List[str] = 0 while self._loader_batch_index < self.loader_batch_size: lowerCamelCase_ : Union[str, Any] = self.loader_batch_item() lowerCamelCase_ : Tuple = item.pop("is_last" ) accumulator.append(a_ ) if is_last: return accumulator else: lowerCamelCase_ : Any = processed lowerCamelCase_ : Dict = item.pop("is_last" ) accumulator.append(a_ ) return accumulator class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def __init__( self , a_ , a_ ): lowerCamelCase_ : int = dataset lowerCamelCase_ : Dict = key def __len__( self ): return len(self.dataset ) def __getitem__( self , a_ ): return self.dataset[i][self.key] class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def __init__( self , a_ , a_ , a_ ): lowerCamelCase_ : Optional[Any] = dataset lowerCamelCase_ : Optional[int] = keya lowerCamelCase_ : str = keya def __len__( self ): return len(self.dataset ) def __getitem__( self , a_ ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
73
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ): '''simple docstring''' lowerCamelCase_ : Any = set() # Replace all the whitespace in our sentence lowerCamelCase_ : str = input_str.replace(" " , "") for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower()) return len(lowerCAmelCase_) == 26 def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ): '''simple docstring''' lowerCamelCase_ : List[Any] = [False] * 26 for char in input_str: if char.islower(): lowerCamelCase_ : List[Any] = True elif char.isupper(): lowerCamelCase_ : Optional[int] = True return all(lowerCAmelCase_) def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ): '''simple docstring''' return len({char for char in input_str.lower() if char.isalpha()}) == 26 def __magic_name__ ( ): '''simple docstring''' from timeit import timeit lowerCamelCase_ : Optional[int] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest" print(timeit("is_pangram()" , setup=lowerCAmelCase_)) print(timeit("is_pangram_faster()" , setup=lowerCAmelCase_)) print(timeit("is_pangram_fastest()" , setup=lowerCAmelCase_)) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
73
1
import json import os import torch from diffusers import UNetaDModel os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True) os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True) os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True) def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' if hor == 128: lowerCamelCase_ : Optional[Any] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") lowerCamelCase_ : Any = (32, 128, 256) lowerCamelCase_ : List[str] = ("UpResnetBlock1D", "UpResnetBlock1D") elif hor == 32: lowerCamelCase_ : Dict = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") lowerCamelCase_ : List[Any] = (32, 64, 128, 256) lowerCamelCase_ : Tuple = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D") lowerCamelCase_ : Dict = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""") lowerCamelCase_ : Optional[int] = model.state_dict() lowerCamelCase_ : List[str] = { "down_block_types": down_block_types, "block_out_channels": block_out_channels, "up_block_types": up_block_types, "layers_per_block": 1, "use_timestep_embedding": True, "out_block_type": "OutConv1DBlock", "norm_num_groups": 8, "downsample_each_block": False, "in_channels": 14, "out_channels": 14, "extra_in_channels": 0, "time_embedding_type": "positional", "flip_sin_to_cos": False, "freq_shift": 1, "sample_size": 6_5536, "mid_block_type": "MidResTemporalBlock1D", "act_fn": "mish", } lowerCamelCase_ : str = UNetaDModel(**lowerCAmelCase_) print(F"""length of state dict: {len(state_dict.keys())}""") print(F"""length of value function dict: {len(hf_value_function.state_dict().keys())}""") lowerCamelCase_ : str = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys())) for k, v in mapping.items(): lowerCamelCase_ : str = state_dict.pop(lowerCAmelCase_) hf_value_function.load_state_dict(lowerCAmelCase_) torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""") with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , "w") as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_) def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : Optional[int] = { "in_channels": 14, "down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), "up_block_types": (), "out_block_type": "ValueFunction", "mid_block_type": "ValueFunctionMidBlock1D", "block_out_channels": (32, 64, 128, 256), "layers_per_block": 1, "downsample_each_block": True, "sample_size": 6_5536, "out_channels": 14, "extra_in_channels": 0, "time_embedding_type": "positional", "use_timestep_embedding": True, "flip_sin_to_cos": False, "freq_shift": 1, "norm_num_groups": 8, "act_fn": "mish", } lowerCamelCase_ : Tuple = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch") lowerCamelCase_ : Any = model lowerCamelCase_ : Tuple = UNetaDModel(**lowerCAmelCase_) print(F"""length of state dict: {len(state_dict.keys())}""") print(F"""length of value function dict: {len(hf_value_function.state_dict().keys())}""") lowerCamelCase_ : int = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys())) for k, v in mapping.items(): lowerCamelCase_ : Dict = state_dict.pop(lowerCAmelCase_) hf_value_function.load_state_dict(lowerCAmelCase_) torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin") with open("hub/hopper-medium-v2/value_function/config.json" , "w") as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_) if __name__ == "__main__": unet(3_2) # unet(128) value_function()
73
__magic_name__ = { "joule": 1.0, "kilojoule": 1_0_0_0, "megajoule": 1_0_0_0_0_0_0, "gigajoule": 1_0_0_0_0_0_0_0_0_0, "wattsecond": 1.0, "watthour": 3_6_0_0, "kilowatthour": 3_6_0_0_0_0_0, "newtonmeter": 1.0, "calorie_nutr": 4_1_8_6.8, "kilocalorie_nutr": 4_1_8_6_8_0_0.0_0, "electronvolt": 1.602_176_634E-19, "britishthermalunit_it": 1_0_5_5.0_5_5_8_5, "footpound": 1.35_58_18, } def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: lowerCamelCase_ : List[Any] = ( F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" F"""Valid values are: {', '.join(lowerCAmelCase_)}""" ) raise ValueError(lowerCAmelCase_) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
73
1
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase ( self ): lowerCamelCase_ : Any = 10 def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[Any] = [1, 2, 3, 4] lowerCamelCase_ : int = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(a_ , self.block_size , 0 ) , a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] lowerCamelCase_ : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(a_ , self.block_size , 0 ) , a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] lowerCamelCase_ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(a_ , self.block_size , 0 ) , a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this." lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = process_story(a_ ) self.assertEqual(a_ , [] ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = "" lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = process_story(a_ ) self.assertEqual(a_ , [] ) self.assertEqual(a_ , [] ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[Any] = ( "It was the year of Our Lord one thousand seven hundred and " "seventy-five\n\nSpiritual revelations were conceded to England " "at that favoured period, as at this.\n@highlight\n\nIt was the best of times" ) lowerCamelCase_ ,lowerCamelCase_ : Tuple = process_story(a_ ) lowerCamelCase_ : List[Any] = [ "It was the year of Our Lord one thousand seven hundred and seventy-five.", "Spiritual revelations were conceded to England at that favoured period, as at this.", ] self.assertEqual(a_ , a_ ) lowerCamelCase_ : Dict = ["It was the best of times."] self.assertEqual(a_ , a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = torch.tensor([1, 2, 3, 4] ) lowerCamelCase_ : Any = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(a_ , 0 ).numpy() , expected.numpy() ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) lowerCamelCase_ : Any = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(a_ , 23 ).numpy() , expected.numpy() ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) lowerCamelCase_ : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(a_ , 1 ).numpy() , expected.numpy() ) def _UpperCamelCase ( self ): lowerCamelCase_ : Union[str, Any] = 101 lowerCamelCase_ : List[str] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) lowerCamelCase_ : List[str] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) lowerCamelCase_ : Optional[Any] = compute_token_type_ids(a_ , a_ ) np.testing.assert_array_equal(a_ , a_ )
73
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'''vocab_file''': '''spiece.model'''} __magic_name__ = { '''vocab_file''': { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''', } } __magic_name__ = { '''xlnet-base-cased''': None, '''xlnet-large-cased''': None, } # Segments (not really needed) __magic_name__ = 0 __magic_name__ = 1 __magic_name__ = 2 __magic_name__ = 3 __magic_name__ = 4 class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Tuple = VOCAB_FILES_NAMES __UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Optional[int] = '''left''' def __init__( self , a_ , a_=False , a_=True , a_=False , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_=["<eop>", "<eod>"] , a_ = None , **a_ , ): # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token lowerCamelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , ) lowerCamelCase_ : str = 3 lowerCamelCase_ : Dict = do_lower_case lowerCamelCase_ : str = remove_space lowerCamelCase_ : Tuple = keep_accents lowerCamelCase_ : Dict = vocab_file lowerCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(a_ ) @property def _UpperCamelCase ( self ): return len(self.sp_model ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): lowerCamelCase_ : Any = self.__dict__.copy() lowerCamelCase_ : Optional[int] = None return state def __setstate__( self , a_ ): lowerCamelCase_ : Union[str, Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCamelCase_ : int = {} lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _UpperCamelCase ( self , a_ ): if self.remove_space: lowerCamelCase_ : Optional[int] = " ".join(inputs.strip().split() ) else: lowerCamelCase_ : str = inputs lowerCamelCase_ : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: lowerCamelCase_ : Dict = unicodedata.normalize("NFKD" , a_ ) lowerCamelCase_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] ) if self.do_lower_case: lowerCamelCase_ : Any = outputs.lower() return outputs def _UpperCamelCase ( self , a_ ): lowerCamelCase_ : List[Any] = self.preprocess_text(a_ ) lowerCamelCase_ : Optional[int] = self.sp_model.encode(a_ , out_type=a_ ) lowerCamelCase_ : List[str] = [] for piece in pieces: if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): lowerCamelCase_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCamelCase_ : int = cur_pieces[1:] else: lowerCamelCase_ : Union[str, Any] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(a_ ) else: new_pieces.append(a_ ) return new_pieces def _UpperCamelCase ( self , a_ ): return self.sp_model.PieceToId(a_ ) def _UpperCamelCase ( self , a_ ): return self.sp_model.IdToPiece(a_ ) def _UpperCamelCase ( self , a_ ): lowerCamelCase_ : Dict = "".join(a_ ).replace(a_ , " " ).strip() return out_string def _UpperCamelCase ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ): lowerCamelCase_ : int = kwargs.pop("use_source_tokenizer" , a_ ) lowerCamelCase_ : List[str] = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 lowerCamelCase_ : Optional[int] = [] lowerCamelCase_ : List[str] = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(a_ ) ) lowerCamelCase_ : Union[str, Any] = [] sub_texts.append(a_ ) else: current_sub_text.append(a_ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(a_ ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens lowerCamelCase_ : Union[str, Any] = "".join(a_ ) lowerCamelCase_ : Optional[Any] = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: lowerCamelCase_ : List[Any] = self.clean_up_tokenization(a_ ) return clean_text else: return text def _UpperCamelCase ( self , a_ , a_ = None ): lowerCamelCase_ : Optional[Any] = [self.sep_token_id] lowerCamelCase_ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ ) if token_ids_a is not None: return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1] return ([0] * len(a_ )) + [1, 1] def _UpperCamelCase ( self , a_ , a_ = None ): lowerCamelCase_ : Optional[Any] = [self.sep_token_id] lowerCamelCase_ : Union[str, Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _UpperCamelCase ( self , a_ , a_ = None ): if not os.path.isdir(a_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCamelCase_ : Any = os.path.join( a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , a_ ) elif not os.path.isfile(self.vocab_file ): with open(a_ , "wb" ) as fi: lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto() fi.write(a_ ) return (out_vocab_file,)
73
1
from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' return getitem, k def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' return setitem, k, v def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' return delitem, k def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_): '''simple docstring''' try: return fun(lowerCAmelCase_ , *lowerCAmelCase_), None except Exception as e: return None, e __magic_name__ = ( _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), ) __magic_name__ = [ _set('''key_a''', '''val_a'''), _set('''key_a''', '''val_b'''), ] __magic_name__ = [ _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), _del('''key_a'''), _del('''key_b'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), ] __magic_name__ = [ _get('''key_a'''), _del('''key_a'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), _del('''key_a'''), _get('''key_a'''), ] __magic_name__ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] __magic_name__ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set('''key_a''', '''val_b'''), ] @pytest.mark.parametrize( "operations" , ( pytest.param(_add_items , id="add items"), pytest.param(_overwrite_items , id="overwrite items"), pytest.param(_delete_items , id="delete items"), pytest.param(_access_absent_items , id="access absent items"), pytest.param(_add_with_resize_up , id="add with resize up"), pytest.param(_add_with_resize_down , id="add with resize down"), ) , ) def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : int = HashMap(initial_block_size=4) lowerCamelCase_ : List[Any] = {} for _, (fun, *args) in enumerate(lowerCAmelCase_): lowerCamelCase_ ,lowerCamelCase_ : str = _run_operation(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_) lowerCamelCase_ ,lowerCamelCase_ : Union[str, Any] = _run_operation(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_) assert my_res == py_res assert str(lowerCAmelCase_) == str(lowerCAmelCase_) assert set(lowerCAmelCase_) == set(lowerCAmelCase_) assert len(lowerCAmelCase_) == len(lowerCAmelCase_) assert set(my.items()) == set(py.items()) def __magic_name__ ( ): '''simple docstring''' def is_public(lowerCAmelCase_) -> bool: return not name.startswith("_") lowerCamelCase_ : Any = {name for name in dir({}) if is_public(lowerCAmelCase_)} lowerCamelCase_ : Optional[int] = {name for name in dir(HashMap()) if is_public(lowerCAmelCase_)} assert dict_public_names > hash_public_names
73
def __magic_name__ ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = True): '''simple docstring''' assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError("Invalid value for min_val or max_val (min_value < max_value)") return min_val if option else max_val def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' return int((number_a + number_a) / 2) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) ), 'argument values must be type of "int"' if lower > higher: raise ValueError("argument value for lower and higher must be(lower > higher)") if not lower < to_guess < higher: raise ValueError( "guess value must be within the range of lower and higher value") def answer(lowerCAmelCase_) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print("started...") lowerCamelCase_ : Optional[int] = lower lowerCamelCase_ : Tuple = higher lowerCamelCase_ : Union[str, Any] = [] while True: lowerCamelCase_ : Optional[int] = get_avg(lowerCAmelCase_ , lowerCAmelCase_) last_numbers.append(lowerCAmelCase_) if answer(lowerCAmelCase_) == "low": lowerCamelCase_ : Any = number elif answer(lowerCAmelCase_) == "high": lowerCamelCase_ : Optional[int] = number else: break print(F"""guess the number : {last_numbers[-1]}""") print(F"""details : {last_numbers!s}""") def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : Optional[int] = int(input("Enter lower value : ").strip()) lowerCamelCase_ : List[str] = int(input("Enter high value : ").strip()) lowerCamelCase_ : List[str] = int(input("Enter value to guess : ").strip()) guess_the_number(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) if __name__ == "__main__": main()
73
1
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor __magic_name__ = logging.get_logger(__name__) class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def __init__( self , *a_ , **a_ ): warnings.warn( "The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use GLPNImageProcessor instead." , a_ , ) super().__init__(*a_ , **a_ )
73
from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''', # See all Cvt models at https://huggingface.co/models?filter=cvt } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : List[str] = '''cvt''' def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ): super().__init__(**a_ ) lowerCamelCase_ : Optional[Any] = num_channels lowerCamelCase_ : str = patch_sizes lowerCamelCase_ : List[Any] = patch_stride lowerCamelCase_ : str = patch_padding lowerCamelCase_ : str = embed_dim lowerCamelCase_ : Union[str, Any] = num_heads lowerCamelCase_ : Optional[Any] = depth lowerCamelCase_ : int = mlp_ratio lowerCamelCase_ : Union[str, Any] = attention_drop_rate lowerCamelCase_ : Optional[Any] = drop_rate lowerCamelCase_ : Optional[int] = drop_path_rate lowerCamelCase_ : Union[str, Any] = qkv_bias lowerCamelCase_ : int = cls_token lowerCamelCase_ : int = qkv_projection_method lowerCamelCase_ : int = kernel_qkv lowerCamelCase_ : Optional[Any] = padding_kv lowerCamelCase_ : Optional[int] = stride_kv lowerCamelCase_ : Optional[int] = padding_q lowerCamelCase_ : List[Any] = stride_q lowerCamelCase_ : Any = initializer_range lowerCamelCase_ : int = layer_norm_eps
73
1
from collections import namedtuple import requests from lxml import html # type: ignore __magic_name__ = namedtuple('''covid_data''', '''cases deaths recovered''') def __magic_name__ ( lowerCAmelCase_ = "https://www.worldometers.info/coronavirus/"): '''simple docstring''' lowerCamelCase_ : Union[str, Any] = "//div[@class = \"maincounter-number\"]/span/text()" return covid_data(*html.fromstring(requests.get(lowerCAmelCase_).content).xpath(lowerCAmelCase_)) __magic_name__ = '''Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}''' print(fmt.format(*covid_stats()))
73
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __magic_name__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
73
1
import numpy as np def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' return 1 / (1 + np.exp(-vector)) def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' return vector * sigmoid(lowerCAmelCase_) if __name__ == "__main__": import doctest doctest.testmod()
73
from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Dict = '''EncodecFeatureExtractor''' __UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self , a_ , a_ ): super().__init__(a_ , a_ ) lowerCamelCase_ : Optional[Any] = self.feature_extractor lowerCamelCase_ : Optional[int] = False def _UpperCamelCase ( self , a_=None , a_=None , a_=True ): return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ ) def __call__( self , *a_ , **a_ ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*a_ , **a_ ) lowerCamelCase_ : str = kwargs.pop("audio" , a_ ) lowerCamelCase_ : List[str] = kwargs.pop("sampling_rate" , a_ ) lowerCamelCase_ : Optional[Any] = kwargs.pop("text" , a_ ) if len(a_ ) > 0: lowerCamelCase_ : int = args[0] lowerCamelCase_ : str = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if text is not None: lowerCamelCase_ : Dict = self.tokenizer(a_ , **a_ ) if audio is not None: lowerCamelCase_ : Optional[Any] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ ) if audio is None: return inputs elif text is None: return audio_inputs else: lowerCamelCase_ : Dict = audio_inputs["input_values"] if "padding_mask" in audio_inputs: lowerCamelCase_ : int = audio_inputs["padding_mask"] return inputs def _UpperCamelCase ( self , *a_ , **a_ ): lowerCamelCase_ : Dict = kwargs.pop("audio" , a_ ) lowerCamelCase_ : Optional[Any] = kwargs.pop("padding_mask" , a_ ) if len(a_ ) > 0: lowerCamelCase_ : Optional[int] = args[0] lowerCamelCase_ : Optional[Any] = args[1:] if audio_values is not None: return self._decode_audio(a_ , padding_mask=a_ ) else: return self.tokenizer.batch_decode(*a_ , **a_ ) def _UpperCamelCase ( self , *a_ , **a_ ): return self.tokenizer.decode(*a_ , **a_ ) def _UpperCamelCase ( self , a_ , a_ = None ): lowerCamelCase_ : Any = to_numpy(a_ ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = audio_values.shape if padding_mask is None: return list(a_ ) lowerCamelCase_ : Tuple = to_numpy(a_ ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) lowerCamelCase_ : List[str] = seq_len - padding_mask.shape[-1] lowerCamelCase_ : int = 1 - self.feature_extractor.padding_value lowerCamelCase_ : List[Any] = np.pad(a_ , ((0, 0), (0, difference)) , "constant" , constant_values=a_ ) lowerCamelCase_ : str = audio_values.tolist() for i in range(a_ ): lowerCamelCase_ : Dict = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] lowerCamelCase_ : Dict = sliced_audio.reshape(a_ , -1 ) return audio_values
73
1
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : List[str] = ['''image_processor''', '''tokenizer'''] __UpperCAmelCase : Optional[int] = '''AutoImageProcessor''' __UpperCAmelCase : Dict = '''AutoTokenizer''' def __init__( self , a_ , a_ ): super().__init__(a_ , a_ ) lowerCamelCase_ : Union[str, Any] = self.image_processor def __call__( self , a_=None , a_=None , a_=None , **a_ ): if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: lowerCamelCase_ : List[Any] = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if images is not None: lowerCamelCase_ : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None and images is not None: lowerCamelCase_ : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def _UpperCamelCase ( self , *a_ , **a_ ): return self.tokenizer.batch_decode(*a_ , **a_ ) def _UpperCamelCase ( self , *a_ , **a_ ): return self.tokenizer.decode(*a_ , **a_ ) @property def _UpperCamelCase ( self ): return ["input_ids", "attention_mask", "pixel_values"]
73
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if digit_amount > 0: return round(number - int(lowerCAmelCase_) , lowerCAmelCase_) return number - int(lowerCAmelCase_) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.3_45, 1)) print(decimal_isolate(35.3_45, 2)) print(decimal_isolate(35.3_45, 3)) print(decimal_isolate(-14.7_89, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.1_23, 1)) print(decimal_isolate(-14.1_23, 2)) print(decimal_isolate(-14.1_23, 3))
73
1
from math import isqrt, loga def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : List[str] = [True] * max_number for i in range(2 , isqrt(max_number - 1) + 1): if is_prime[i]: for j in range(i**2 , lowerCAmelCase_ , lowerCAmelCase_): lowerCamelCase_ : Optional[int] = False return [i for i in range(2 , lowerCAmelCase_) if is_prime[i]] def __magic_name__ ( lowerCAmelCase_ = 80_0800 , lowerCAmelCase_ = 80_0800): '''simple docstring''' lowerCamelCase_ : int = degree * loga(lowerCAmelCase_) lowerCamelCase_ : int = int(lowerCAmelCase_) lowerCamelCase_ : Any = calculate_prime_numbers(lowerCAmelCase_) lowerCamelCase_ : Optional[int] = 0 lowerCamelCase_ : Union[str, Any] = 0 lowerCamelCase_ : str = len(lowerCAmelCase_) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left]) + prime_numbers[left] * loga(prime_numbers[right]) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f'''{solution() = }''')
73
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ): lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18} lowerCamelCase_ : str = parent lowerCamelCase_ : str = batch_size lowerCamelCase_ : Tuple = num_channels lowerCamelCase_ : Optional[int] = image_size lowerCamelCase_ : List[str] = min_resolution lowerCamelCase_ : Tuple = max_resolution lowerCamelCase_ : Tuple = do_resize lowerCamelCase_ : Dict = size lowerCamelCase_ : List[str] = apply_ocr def _UpperCamelCase ( self ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self ) @property def _UpperCamelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , "do_resize" ) ) self.assertTrue(hasattr(a_ , "size" ) ) self.assertTrue(hasattr(a_ , "apply_ocr" ) ) def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def _UpperCamelCase ( self ): pass def _UpperCamelCase ( self ): # Initialize image_processing lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) self.assertIsInstance(encoding.words , a_ ) self.assertIsInstance(encoding.boxes , a_ ) # Test batched lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase ( self ): # Initialize image_processing lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase ( self ): # Initialize image_processing lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase ( self ): # with apply_OCR = True lowerCamelCase_ : Any = LayoutLMvaImageProcessor() from datasets import load_dataset lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" ) lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" ) lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , a_ ) self.assertListEqual(encoding.boxes , a_ ) # with apply_OCR = False lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ ) lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
73
1
def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' if p < 2: raise ValueError("p should not be less than 2!") elif p == 2: return True lowerCamelCase_ : Tuple = 4 lowerCamelCase_ : Union[str, Any] = (1 << p) - 1 for _ in range(p - 2): lowerCamelCase_ : Tuple = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(1_1))
73
from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''', '''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''', } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : List[Any] = '''luke''' def __init__( self , a_=5_0267 , a_=50_0000 , a_=768 , a_=256 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=True , a_=None , a_=1 , a_=0 , a_=2 , **a_ , ): super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ ) lowerCamelCase_ : Tuple = vocab_size lowerCamelCase_ : Optional[int] = entity_vocab_size lowerCamelCase_ : Any = hidden_size lowerCamelCase_ : Dict = entity_emb_size lowerCamelCase_ : List[Any] = num_hidden_layers lowerCamelCase_ : int = num_attention_heads lowerCamelCase_ : Union[str, Any] = hidden_act lowerCamelCase_ : Tuple = intermediate_size lowerCamelCase_ : Optional[Any] = hidden_dropout_prob lowerCamelCase_ : Any = attention_probs_dropout_prob lowerCamelCase_ : Optional[Any] = max_position_embeddings lowerCamelCase_ : str = type_vocab_size lowerCamelCase_ : int = initializer_range lowerCamelCase_ : List[Any] = layer_norm_eps lowerCamelCase_ : Optional[int] = use_entity_aware_attention lowerCamelCase_ : str = classifier_dropout
73
1
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed __magic_name__ = '''true''' def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=82 , lowerCAmelCase_=16): '''simple docstring''' set_seed(42) lowerCamelCase_ : Optional[Any] = RegressionModel() lowerCamelCase_ : Optional[Any] = deepcopy(lowerCAmelCase_) lowerCamelCase_ : str = RegressionDataset(length=lowerCAmelCase_) lowerCamelCase_ : Optional[int] = DataLoader(lowerCAmelCase_ , batch_size=lowerCAmelCase_) model.to(accelerator.device) lowerCamelCase_ ,lowerCamelCase_ : int = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_) return model, ddp_model, dataloader def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=False): '''simple docstring''' lowerCamelCase_ : int = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased") lowerCamelCase_ : Tuple = load_dataset("glue" , "mrpc" , split="validation") def tokenize_function(lowerCAmelCase_): lowerCamelCase_ : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_) return outputs with accelerator.main_process_first(): lowerCamelCase_ : Any = dataset.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , ) lowerCamelCase_ : Tuple = tokenized_datasets.rename_column("label" , "labels") def collate_fn(lowerCAmelCase_): if use_longest: return tokenizer.pad(lowerCAmelCase_ , padding="longest" , return_tensors="pt") return tokenizer.pad(lowerCAmelCase_ , padding="max_length" , max_length=128 , return_tensors="pt") return DataLoader(lowerCAmelCase_ , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=16) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Tuple = Accelerator(dispatch_batches=lowerCAmelCase_ , split_batches=lowerCAmelCase_) lowerCamelCase_ : List[str] = get_dataloader(lowerCAmelCase_ , not dispatch_batches) lowerCamelCase_ : List[str] = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased" , return_dict=lowerCAmelCase_) lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : List[Any] = [] for batch in dataloader: lowerCamelCase_ ,lowerCamelCase_ : List[str] = batch.values() with torch.no_grad(): lowerCamelCase_ : str = model(lowerCAmelCase_) lowerCamelCase_ ,lowerCamelCase_ : int = accelerator.gather_for_metrics((logit, target)) logits_and_targets.append((logit, target)) lowerCamelCase_ ,lowerCamelCase_ : Union[str, Any] = [], [] for logit, targ in logits_and_targets: logits.append(lowerCAmelCase_) targs.append(lowerCAmelCase_) lowerCamelCase_ ,lowerCamelCase_ : List[Any] = torch.cat(lowerCAmelCase_), torch.cat(lowerCAmelCase_) return logits, targs def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=82 , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=16): '''simple docstring''' lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[Any] = get_basic_setup(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) lowerCamelCase_ ,lowerCamelCase_ : Any = generate_predictions(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) assert ( len(lowerCAmelCase_) == num_samples ), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCAmelCase_)}""" def __magic_name__ ( lowerCAmelCase_ = False , lowerCAmelCase_ = False): '''simple docstring''' lowerCamelCase_ : Any = evaluate.load("glue" , "mrpc") lowerCamelCase_ ,lowerCamelCase_ : List[str] = get_mrpc_setup(lowerCAmelCase_ , lowerCAmelCase_) # First do baseline lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = setup["no"] model.to(lowerCAmelCase_) model.eval() for batch in dataloader: batch.to(lowerCAmelCase_) with torch.inference_mode(): lowerCamelCase_ : Optional[Any] = model(**lowerCAmelCase_) lowerCamelCase_ : List[str] = outputs.logits.argmax(dim=-1) metric.add_batch(predictions=lowerCAmelCase_ , references=batch["labels"]) lowerCamelCase_ : Optional[int] = metric.compute() # Then do distributed lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Any = setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): lowerCamelCase_ : List[Any] = model(**lowerCAmelCase_) lowerCamelCase_ : Tuple = outputs.logits.argmax(dim=-1) lowerCamelCase_ : Optional[Any] = batch["labels"] lowerCamelCase_ ,lowerCamelCase_ : Union[str, Any] = accelerator.gather_for_metrics((preds, references)) metric.add_batch(predictions=lowerCAmelCase_ , references=lowerCAmelCase_) lowerCamelCase_ : Union[str, Any] = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key]), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n""" def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : List[str] = Accelerator(split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**") for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""") test_mrpc(lowerCAmelCase_ , lowerCAmelCase_) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test torch metrics**") for split_batches in [True, False]: for dispatch_batches in [True, False]: lowerCamelCase_ : int = Accelerator(split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_) if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""") test_torch_metrics(lowerCAmelCase_ , 99) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**") lowerCamelCase_ : Optional[Any] = Accelerator() test_torch_metrics(lowerCAmelCase_ , 512) accelerator.state._reset_state() def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' main() if __name__ == "__main__": main()
73
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int __magic_name__ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class lowerCAmelCase__ ( datasets.BuilderConfig ): """simple docstring""" __UpperCAmelCase : Optional[datasets.Features] = None def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , ): '''simple docstring''' import pyspark def generate_fn(): lowerCamelCase_ : Dict = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id")) for partition_id in partition_order: lowerCamelCase_ : Dict = df_with_partition_id.select("*").where(F"""part_id = {partition_id}""").drop("part_id") lowerCamelCase_ : Dict = partition_df.collect() lowerCamelCase_ : Dict = 0 for row in rows: yield F"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class lowerCAmelCase__ ( _BaseExamplesIterable ): """simple docstring""" def __init__( self , a_ , a_=None , ): lowerCamelCase_ : Dict = df lowerCamelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() ) lowerCamelCase_ : int = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self ): yield from self.generate_examples_fn() def _UpperCamelCase ( self , a_ ): lowerCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(a_ ) return SparkExamplesIterable(self.df , partition_order=a_ ) def _UpperCamelCase ( self , a_ , a_ ): lowerCamelCase_ : Dict = self.split_shard_indices_by_worker(a_ , a_ ) return SparkExamplesIterable(self.df , partition_order=a_ ) @property def _UpperCamelCase ( self ): return len(self.partition_order ) class lowerCAmelCase__ ( datasets.DatasetBuilder ): """simple docstring""" __UpperCAmelCase : Any = SparkConfig def __init__( self , a_ , a_ = None , a_ = None , **a_ , ): import pyspark lowerCamelCase_ : str = pyspark.sql.SparkSession.builder.getOrCreate() lowerCamelCase_ : Optional[Any] = df lowerCamelCase_ : List[Any] = working_dir super().__init__( cache_dir=a_ , config_name=str(self.df.semanticHash() ) , **a_ , ) def _UpperCamelCase ( self ): # Returns the path of the created file. def create_cache_and_write_probe(a_ ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=a_ ) lowerCamelCase_ : Optional[Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(a_ , "a" ) return [probe_file] if self._spark.conf.get("spark.master" , "" ).startswith("local" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: lowerCamelCase_ : List[str] = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" ) def _UpperCamelCase ( self ): return datasets.DatasetInfo(features=self.config.features ) def _UpperCamelCase ( self , a_ ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def _UpperCamelCase ( self , a_ ): import pyspark def get_arrow_batch_size(a_ ): for batch in it: yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} ) lowerCamelCase_ : str = self.df.count() lowerCamelCase_ : List[Any] = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. lowerCamelCase_ : Any = ( self.df.limit(a_ ) .repartition(1 ) .mapInArrow(a_ , "batch_bytes: long" ) .agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) ) .collect()[0] .sample_bytes / sample_num_rows ) lowerCamelCase_ : int = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. lowerCamelCase_ : Union[str, Any] = min(a_ , int(approx_total_size / max_shard_size ) ) lowerCamelCase_ : int = self.df.repartition(a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , ): import pyspark lowerCamelCase_ : str = ParquetWriter if file_format == "parquet" else ArrowWriter lowerCamelCase_ : int = os.path.join(self._working_dir , os.path.basename(a_ ) ) if self._working_dir else fpath lowerCamelCase_ : Optional[Any] = file_format == "parquet" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. lowerCamelCase_ : int = self.config.features lowerCamelCase_ : Any = self._writer_batch_size lowerCamelCase_ : Tuple = self._fs.storage_options def write_arrow(a_ ): # Within the same SparkContext, no two task attempts will share the same attempt ID. lowerCamelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId() lowerCamelCase_ : Optional[int] = next(a_ , a_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , ) lowerCamelCase_ : List[Any] = 0 lowerCamelCase_ : Optional[int] = writer_class( features=a_ , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , ) lowerCamelCase_ : Optional[Any] = pa.Table.from_batches([first_batch] ) writer.write_table(a_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: lowerCamelCase_ ,lowerCamelCase_ : List[str] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) shard_id += 1 lowerCamelCase_ : List[str] = writer_class( features=writer._features , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , ) lowerCamelCase_ : Optional[int] = pa.Table.from_batches([batch] ) writer.write_table(a_ ) if writer._num_bytes > 0: lowerCamelCase_ ,lowerCamelCase_ : Dict = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(a_ ) ): lowerCamelCase_ : str = os.path.join(os.path.dirname(a_ ) , os.path.basename(a_ ) ) shutil.move(a_ , a_ ) lowerCamelCase_ : int = ( self.df.mapInArrow(a_ , "task_id: long, num_examples: long, num_bytes: long" ) .groupBy("task_id" ) .agg( pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def _UpperCamelCase ( self , a_ , a_ = "arrow" , a_ = None , a_ = None , **a_ , ): self._validate_cache_dir() lowerCamelCase_ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(a_ ) lowerCamelCase_ : Dict = not is_remote_filesystem(self._fs ) lowerCamelCase_ : List[str] = os.path.join if is_local else posixpath.join lowerCamelCase_ : Any = "-TTTTT-SSSSS-of-NNNNN" lowerCamelCase_ : List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" lowerCamelCase_ : int = path_join(self._output_dir , a_ ) lowerCamelCase_ : int = 0 lowerCamelCase_ : Optional[Any] = 0 lowerCamelCase_ : int = 0 lowerCamelCase_ : Dict = [] lowerCamelCase_ : Any = [] for task_id, content in self._prepare_split_single(a_ , a_ , a_ ): ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) : Tuple = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(a_ ) lowerCamelCase_ : Dict = total_num_examples lowerCamelCase_ : Any = total_num_bytes # should rename everything at the end logger.debug(F"""Renaming {total_shards} shards.""" ) if total_shards > 1: lowerCamelCase_ : List[Any] = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. lowerCamelCase_ : Any = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( a_ , a_ , a_ , ): rename( a_ , fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , F"""{global_shard_id:05d}""" ).replace("NNNNN" , F"""{total_shards:05d}""" ) , ) lowerCamelCase_ : Optional[int] = [] lowerCamelCase_ : Dict = 0 for i in range(len(a_ ) ): lowerCamelCase_ ,lowerCamelCase_ : Tuple = task_id_and_num_shards[i] for shard_id in range(a_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(a_ , len(a_ ) ).map(lambda a_ : _rename_shard(*a_ ) ).collect() else: # don't use any pattern lowerCamelCase_ : int = 0 lowerCamelCase_ : Optional[int] = task_id_and_num_shards[0][0] self._rename( fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace(a_ , "" ) , ) def _UpperCamelCase ( self , a_ , ): return SparkExamplesIterable(self.df )
73
1
import numpy as np import datasets __magic_name__ = ''' Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] ''' __magic_name__ = '''\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } ''' __magic_name__ = ''' Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric("mahalanobis") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {\'mahalanobis\': array([0.5])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): """simple docstring""" def _UpperCamelCase ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def _UpperCamelCase ( self , a_ , a_ ): # convert to numpy arrays lowerCamelCase_ : Any = np.array(a_ ) lowerCamelCase_ : str = np.array(a_ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction lowerCamelCase_ : Optional[int] = X - np.mean(a_ ) lowerCamelCase_ : Dict = np.cov(reference_distribution.T ) try: lowerCamelCase_ : Dict = np.linalg.inv(a_ ) except np.linalg.LinAlgError: lowerCamelCase_ : Optional[int] = np.linalg.pinv(a_ ) lowerCamelCase_ : Optional[Any] = np.dot(a_ , a_ ) lowerCamelCase_ : Tuple = np.dot(a_ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
73
from queue import PriorityQueue from typing import Any import numpy as np def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ): '''simple docstring''' for nxt, d in graph[v]: if nxt in visited_forward: continue lowerCamelCase_ : List[str] = cst_fwd.get(lowerCAmelCase_ , np.inf) lowerCamelCase_ : Dict = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt)) lowerCamelCase_ : Optional[int] = new_cost_f lowerCamelCase_ : List[str] = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: lowerCamelCase_ : Tuple = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[Any] = -1 lowerCamelCase_ : Tuple = set() lowerCamelCase_ : Dict = set() lowerCamelCase_ : int = {source: 0} lowerCamelCase_ : str = {destination: 0} lowerCamelCase_ : Tuple = {source: None} lowerCamelCase_ : Dict = {destination: None} lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue() lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue() lowerCamelCase_ : List[str] = np.inf queue_forward.put((0, source)) queue_backward.put((0, destination)) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): lowerCamelCase_ ,lowerCamelCase_ : List[Any] = queue_forward.get() visited_forward.add(lowerCAmelCase_) lowerCamelCase_ ,lowerCamelCase_ : str = queue_backward.get() visited_backward.add(lowerCAmelCase_) lowerCamelCase_ : Any = pass_and_relaxation( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) lowerCamelCase_ : Dict = pass_and_relaxation( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: lowerCamelCase_ : Union[str, Any] = shortest_distance return shortest_path_distance __magic_name__ = { '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } __magic_name__ = { '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
73
1
from __future__ import annotations def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if len(lowerCAmelCase_) < k or k < 0: raise ValueError("Invalid Input") lowerCamelCase_ : Any = sum(array[:k]) for i in range(len(lowerCAmelCase_) - k): lowerCamelCase_ : Any = current_sum - array[i] + array[i + k] lowerCamelCase_ : Optional[int] = max(lowerCAmelCase_ , lowerCAmelCase_) return max_sum if __name__ == "__main__": from doctest import testmod from random import randint testmod() __magic_name__ = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0_0)] __magic_name__ = randint(0, 1_1_0) print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
73
from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Dict = '''ctrl''' __UpperCAmelCase : Dict = ['''past_key_values'''] __UpperCAmelCase : int = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , a_=24_6534 , a_=256 , a_=1280 , a_=8192 , a_=48 , a_=16 , a_=0.1 , a_=0.1 , a_=1E-6 , a_=0.02 , a_=True , **a_ , ): lowerCamelCase_ : Dict = vocab_size lowerCamelCase_ : Any = n_positions lowerCamelCase_ : Optional[int] = n_embd lowerCamelCase_ : List[Any] = n_layer lowerCamelCase_ : Union[str, Any] = n_head lowerCamelCase_ : str = dff lowerCamelCase_ : Tuple = resid_pdrop lowerCamelCase_ : Any = embd_pdrop lowerCamelCase_ : Dict = layer_norm_epsilon lowerCamelCase_ : Tuple = initializer_range lowerCamelCase_ : Any = use_cache super().__init__(**a_ )
73
1
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Any = 1.5 lowerCamelCase_ : Tuple = int(factor * num_class_images) lowerCamelCase_ : Dict = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase_ , aesthetic_weight=0.1) os.makedirs(F"""{class_data_dir}/images""" , exist_ok=lowerCAmelCase_) if len(list(Path(F"""{class_data_dir}/images""").iterdir())) >= num_class_images: return while True: lowerCamelCase_ : str = client.query(text=lowerCAmelCase_) if len(lowerCAmelCase_) >= factor * num_class_images or num_images > 1E4: break else: lowerCamelCase_ : str = int(factor * num_images) lowerCamelCase_ : Tuple = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 , ) lowerCamelCase_ : Optional[int] = 0 lowerCamelCase_ : str = 0 lowerCamelCase_ : List[Any] = tqdm(desc="downloading real regularization images" , total=lowerCAmelCase_) with open(F"""{class_data_dir}/caption.txt""" , "w") as fa, open(F"""{class_data_dir}/urls.txt""" , "w") as fa, open( F"""{class_data_dir}/images.txt""" , "w") as fa: while total < num_class_images: lowerCamelCase_ : List[Any] = class_images[count] count += 1 try: lowerCamelCase_ : Optional[int] = requests.get(images["url"]) if img.status_code == 200: lowerCamelCase_ : Dict = Image.open(BytesIO(img.content)) with open(F"""{class_data_dir}/images/{total}.jpg""" , "wb") as f: f.write(img.content) fa.write(images["caption"] + "\n") fa.write(images["url"] + "\n") fa.write(F"""{class_data_dir}/images/{total}.jpg""" + "\n") total += 1 pbar.update(1) else: continue except Exception: continue return def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : Any = argparse.ArgumentParser("" , add_help=lowerCAmelCase_) parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=lowerCAmelCase_ , type=lowerCAmelCase_) parser.add_argument("--class_data_dir" , help="path to save images" , required=lowerCAmelCase_ , type=lowerCAmelCase_) parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=lowerCAmelCase_) return parser.parse_args() if __name__ == "__main__": __magic_name__ = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
73
import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process __magic_name__ = logging.getLogger(__name__) __magic_name__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) __magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCAmelCase__ : """simple docstring""" __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) }, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCamelCase )}, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) }, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, ) __UpperCAmelCase : bool = field( default=__lowerCamelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, ) __UpperCAmelCase : str = field( default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, ) __UpperCAmelCase : bool = field( default=__lowerCamelCase, metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) }, ) def _UpperCamelCase ( self ): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class lowerCAmelCase__ : """simple docstring""" __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __UpperCAmelCase : Optional[str] = field(default=__lowerCamelCase, metadata={'''help''': '''The input training data file (a text file).'''} ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''}, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''}, ) __UpperCAmelCase : bool = field( default=__lowerCamelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) __UpperCAmelCase : Optional[int] = field( default=5, metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' }, ) __UpperCAmelCase : Optional[int] = field( default=__lowerCamelCase, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated. Default to the max input length of the model.''' ) }, ) __UpperCAmelCase : Optional[int] = field( default=__lowerCamelCase, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, ) __UpperCAmelCase : float = field( default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} ) __UpperCAmelCase : bool = field( default=__lowerCamelCase, metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) }, ) def _UpperCamelCase ( self ): if self.train_file is not None: lowerCamelCase_ : str = self.train_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: lowerCamelCase_ : Union[str, Any] = self.validation_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' with open(lowerCAmelCase_ , "r" , encoding="utf-8") as f: lowerCamelCase_ : Tuple = [json.loads(lowerCAmelCase_) for line in f.read().splitlines() if (len(lowerCAmelCase_) > 0 and not line.isspace())] assert len(lowerCAmelCase_) == len(lowerCAmelCase_) lowerCamelCase_ : Any = {c: dataset[c] for c in dataset.column_names} lowerCamelCase_ : List[Any] = refs return Dataset.from_dict(lowerCAmelCase_) def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : str = parser.parse_args_into_dataclasses() # Detecting last checkpoint. lowerCamelCase_ : List[str] = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: lowerCamelCase_ : Dict = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome.") elif last_checkpoint is not None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch.") # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""") # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , lowerCAmelCase_) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowerCamelCase_ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name) if "validation" not in datasets.keys(): lowerCamelCase_ : Any = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , ) lowerCamelCase_ : Optional[int] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , ) else: lowerCamelCase_ : Dict = {} if data_args.train_file is not None: lowerCamelCase_ : str = data_args.train_file if data_args.validation_file is not None: lowerCamelCase_ : Any = data_args.validation_file lowerCamelCase_ : Any = data_args.train_file.split(".")[-1] if extension == "txt": lowerCamelCase_ : List[str] = "text" lowerCamelCase_ : Dict = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCamelCase_ : Optional[Any] = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_) elif model_args.model_name_or_path: lowerCamelCase_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_) else: lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(F"""Overriding config: {model_args.config_overrides}""") config.update_from_string(model_args.config_overrides) logger.info(F"""New config: {config}""") lowerCamelCase_ : List[str] = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: lowerCamelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_) elif model_args.model_name_or_path: lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name.") if model_args.model_name_or_path: lowerCamelCase_ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch") lowerCamelCase_ : Dict = AutoModelForMaskedLM.from_config(lowerCAmelCase_) model.resize_token_embeddings(len(lowerCAmelCase_)) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: lowerCamelCase_ : Optional[Any] = datasets["train"].column_names else: lowerCamelCase_ : Dict = datasets["validation"].column_names lowerCamelCase_ : Union[str, Any] = "text" if "text" in column_names else column_names[0] lowerCamelCase_ : Optional[Any] = "max_length" if data_args.pad_to_max_length else False def tokenize_function(lowerCAmelCase_): # Remove empty lines lowerCamelCase_ : str = [line for line in examples["text"] if len(lowerCAmelCase_) > 0 and not line.isspace()] return tokenizer(examples["text"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length) lowerCamelCase_ : str = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: lowerCamelCase_ : List[Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file) if data_args.validation_ref_file is not None: lowerCamelCase_ : List[str] = add_chinese_references( tokenized_datasets["validation"] , data_args.validation_ref_file) # If we have ref files, need to avoid it removed by trainer lowerCamelCase_ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file if has_ref: lowerCamelCase_ : Union[str, Any] = False # Data collator # This one will take care of randomly masking the tokens. lowerCamelCase_ : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability) # Initialize our Trainer lowerCamelCase_ : int = Trainer( model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , ) # Training if training_args.do_train: if last_checkpoint is not None: lowerCamelCase_ : Dict = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path): lowerCamelCase_ : Dict = model_args.model_name_or_path else: lowerCamelCase_ : int = None lowerCamelCase_ : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_) trainer.save_model() # Saves the tokenizer too for easy upload lowerCamelCase_ : Tuple = os.path.join(training_args.output_dir , "train_results.txt") if trainer.is_world_process_zero(): with open(lowerCAmelCase_ , "w") as writer: logger.info("***** Train results *****") for key, value in sorted(train_result.metrics.items()): logger.info(F""" {key} = {value}""") writer.write(F"""{key} = {value}\n""") # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json")) # Evaluation lowerCamelCase_ : Dict = {} if training_args.do_eval: logger.info("*** Evaluate ***") lowerCamelCase_ : Tuple = trainer.evaluate() lowerCamelCase_ : str = math.exp(eval_output["eval_loss"]) lowerCamelCase_ : Tuple = perplexity lowerCamelCase_ : int = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt") if trainer.is_world_process_zero(): with open(lowerCAmelCase_ , "w") as writer: logger.info("***** Eval results *****") for key, value in sorted(results.items()): logger.info(F""" {key} = {value}""") writer.write(F"""{key} = {value}\n""") return results def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' main() if __name__ == "__main__": main()
73
1
from __future__ import annotations from collections.abc import Sequence from typing import Literal def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[Any] = list(lowerCAmelCase_) lowerCamelCase_ : Tuple = list(lowerCAmelCase_) lowerCamelCase_ : int = 0 for i in range(len(lowerCAmelCase_)): if lista[i] != lista[i]: count += 1 lowerCamelCase_ : Dict = "_" if count > 1: return False else: return "".join(lowerCAmelCase_) def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : int = [] while True: lowerCamelCase_ : Any = ["$"] * len(lowerCAmelCase_) lowerCamelCase_ : List[Any] = [] for i in range(len(lowerCAmelCase_)): for j in range(i + 1 , len(lowerCAmelCase_)): lowerCamelCase_ : Tuple = compare_string(binary[i] , binary[j]) if k is False: lowerCamelCase_ : Dict = "*" lowerCamelCase_ : Dict = "*" temp.append("X") for i in range(len(lowerCAmelCase_)): if checka[i] == "$": pi.append(binary[i]) if len(lowerCAmelCase_) == 0: return pi lowerCamelCase_ : Union[str, Any] = list(set(lowerCAmelCase_)) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : int = [] for minterm in minterms: lowerCamelCase_ : Optional[int] = "" for _ in range(lowerCAmelCase_): lowerCamelCase_ : int = str(minterm % 2) + string minterm //= 2 temp.append(lowerCAmelCase_) return temp def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Union[str, Any] = list(lowerCAmelCase_) lowerCamelCase_ : str = list(lowerCAmelCase_) lowerCamelCase_ : Union[str, Any] = 0 for i in range(len(lowerCAmelCase_)): if lista[i] != lista[i]: count_n += 1 return count_n == count def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Dict = [] lowerCamelCase_ : int = [0] * len(lowerCAmelCase_) for i in range(len(chart[0])): lowerCamelCase_ : int = 0 lowerCamelCase_ : Optional[int] = -1 for j in range(len(lowerCAmelCase_)): if chart[j][i] == 1: count += 1 lowerCamelCase_ : Union[str, Any] = j if count == 1: lowerCamelCase_ : Dict = 1 for i in range(len(lowerCAmelCase_)): if select[i] == 1: for j in range(len(chart[0])): if chart[i][j] == 1: for k in range(len(lowerCAmelCase_)): lowerCamelCase_ : int = 0 temp.append(prime_implicants[i]) while True: lowerCamelCase_ : List[str] = 0 lowerCamelCase_ : List[Any] = -1 lowerCamelCase_ : Union[str, Any] = 0 for i in range(len(lowerCAmelCase_)): lowerCamelCase_ : Any = chart[i].count(1) if count_n > max_n: lowerCamelCase_ : int = count_n lowerCamelCase_ : Optional[int] = i if max_n == 0: return temp temp.append(prime_implicants[rem]) for i in range(len(chart[0])): if chart[rem][i] == 1: for j in range(len(lowerCAmelCase_)): lowerCamelCase_ : List[Any] = 0 def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : int = [[0 for x in range(len(lowerCAmelCase_))] for x in range(len(lowerCAmelCase_))] for i in range(len(lowerCAmelCase_)): lowerCamelCase_ : List[str] = prime_implicants[i].count("_") for j in range(len(lowerCAmelCase_)): if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_): lowerCamelCase_ : Any = 1 return chart def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : int = int(input("Enter the no. of variables\n")) lowerCamelCase_ : Optional[Any] = [ float(lowerCAmelCase_) for x in input( "Enter the decimal representation of Minterms 'Spaces Separated'\n").split() ] lowerCamelCase_ : List[Any] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_) lowerCamelCase_ : Dict = check(lowerCAmelCase_) print("Prime Implicants are:") print(lowerCAmelCase_) lowerCamelCase_ : List[str] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_) lowerCamelCase_ : List[str] = selection(lowerCAmelCase_ , lowerCAmelCase_) print("Essential Prime Implicants are:") print(lowerCAmelCase_) if __name__ == "__main__": import doctest doctest.testmod() main()
73
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class lowerCAmelCase__ : """simple docstring""" # setable values __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Optional[jnp.ndarray] = None __UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def _UpperCamelCase ( cls ): return cls() @dataclass class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : KarrasVeSchedulerState class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase ): """simple docstring""" @property def _UpperCamelCase ( self ): return True @register_to_config def __init__( self , a_ = 0.02 , a_ = 100 , a_ = 1.0_07 , a_ = 80 , a_ = 0.05 , a_ = 50 , ): pass def _UpperCamelCase ( self ): return KarrasVeSchedulerState.create() def _UpperCamelCase ( self , a_ , a_ , a_ = () ): lowerCamelCase_ : List[Any] = jnp.arange(0 , a_ )[::-1].copy() lowerCamelCase_ : List[str] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=a_ , schedule=jnp.array(a_ , dtype=jnp.floataa ) , timesteps=a_ , ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , ): if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase_ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: lowerCamelCase_ : Optional[int] = 0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase_ : Union[str, Any] = random.split(a_ , num=1 ) lowerCamelCase_ : str = self.config.s_noise * random.normal(key=a_ , shape=sample.shape ) lowerCamelCase_ : List[str] = sigma + gamma * sigma lowerCamelCase_ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ = True , ): lowerCamelCase_ : List[str] = sample_hat + sigma_hat * model_output lowerCamelCase_ : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat lowerCamelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ): lowerCamelCase_ : Optional[Any] = sample_prev + sigma_prev * model_output lowerCamelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev lowerCamelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ): raise NotImplementedError()
73
1
from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING __magic_name__ = logging.get_logger(__name__) @add_end_docstrings(__lowerCamelCase ) class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def __init__( self , *a_ , **a_ ): super().__init__(*a_ , **a_ ) requires_backends(self , "decord" ) self.check_model_type(a_ ) def _UpperCamelCase ( self , a_=None , a_=None , a_=None ): lowerCamelCase_ : Dict = {} if frame_sampling_rate is not None: lowerCamelCase_ : Dict = frame_sampling_rate if num_frames is not None: lowerCamelCase_ : Any = num_frames lowerCamelCase_ : Optional[int] = {} if top_k is not None: lowerCamelCase_ : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self , a_ , **a_ ): return super().__call__(a_ , **a_ ) def _UpperCamelCase ( self , a_ , a_=None , a_=1 ): if num_frames is None: lowerCamelCase_ : str = self.model.config.num_frames if video.startswith("http://" ) or video.startswith("https://" ): lowerCamelCase_ : int = BytesIO(requests.get(a_ ).content ) lowerCamelCase_ : Optional[int] = VideoReader(a_ ) videoreader.seek(0 ) lowerCamelCase_ : List[str] = 0 lowerCamelCase_ : str = num_frames * frame_sampling_rate - 1 lowerCamelCase_ : str = np.linspace(a_ , a_ , num=a_ , dtype=np.intaa ) lowerCamelCase_ : List[str] = videoreader.get_batch(a_ ).asnumpy() lowerCamelCase_ : int = list(a_ ) lowerCamelCase_ : Union[str, Any] = self.image_processor(a_ , return_tensors=self.framework ) return model_inputs def _UpperCamelCase ( self , a_ ): lowerCamelCase_ : int = self.model(**a_ ) return model_outputs def _UpperCamelCase ( self , a_ , a_=5 ): if top_k > self.model.config.num_labels: lowerCamelCase_ : int = self.model.config.num_labels if self.framework == "pt": lowerCamelCase_ : Tuple = model_outputs.logits.softmax(-1 )[0] lowerCamelCase_ ,lowerCamelCase_ : Tuple = probs.topk(a_ ) else: raise ValueError(F"""Unsupported framework: {self.framework}""" ) lowerCamelCase_ : Tuple = scores.tolist() lowerCamelCase_ : Union[str, Any] = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(a_ , a_ )]
73
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Any = StableDiffusionDiffEditPipeline __UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} __UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} __UpperCAmelCase : List[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __UpperCAmelCase : List[str] = frozenset([] ) def _UpperCamelCase ( self ): torch.manual_seed(0 ) lowerCamelCase_ : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , ) lowerCamelCase_ : str = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , ) lowerCamelCase_ : Dict = DDIMInverseScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_zero=a_ , ) torch.manual_seed(0 ) lowerCamelCase_ : List[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCamelCase_ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) lowerCamelCase_ : Optional[Any] = CLIPTextModel(a_ ) lowerCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowerCamelCase_ : Optional[Any] = { "unet": unet, "scheduler": scheduler, "inverse_scheduler": inverse_scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def _UpperCamelCase ( self , a_ , a_=0 ): lowerCamelCase_ : str = floats_tensor((1, 16, 16) , rng=random.Random(a_ ) ).to(a_ ) lowerCamelCase_ : List[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a_ ) ).to(a_ ) if str(a_ ).startswith("mps" ): lowerCamelCase_ : List[Any] = torch.manual_seed(a_ ) else: lowerCamelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(a_ ) lowerCamelCase_ : Tuple = { "prompt": "a dog and a newt", "mask_image": mask, "image_latents": latents, "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _UpperCamelCase ( self , a_ , a_=0 ): lowerCamelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ ) lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase_ : Any = Image.fromarray(np.uinta(a_ ) ).convert("RGB" ) if str(a_ ).startswith("mps" ): lowerCamelCase_ : Tuple = torch.manual_seed(a_ ) else: lowerCamelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(a_ ) lowerCamelCase_ : int = { "image": image, "source_prompt": "a cat and a frog", "target_prompt": "a dog and a newt", "generator": generator, "num_inference_steps": 2, "num_maps_per_mask": 2, "mask_encode_strength": 1.0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _UpperCamelCase ( self , a_ , a_=0 ): lowerCamelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ ) lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase_ : Optional[int] = Image.fromarray(np.uinta(a_ ) ).convert("RGB" ) if str(a_ ).startswith("mps" ): lowerCamelCase_ : Optional[int] = torch.manual_seed(a_ ) else: lowerCamelCase_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ ) lowerCamelCase_ : Union[str, Any] = { "image": image, "prompt": "a cat and a frog", "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "decode_latents": True, "output_type": "numpy", } return inputs def _UpperCamelCase ( self ): if not hasattr(self.pipeline_class , "_optional_components" ): return lowerCamelCase_ : List[Any] = self.get_dummy_components() lowerCamelCase_ : int = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(a_ , a_ , a_ ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) lowerCamelCase_ : int = self.get_dummy_inputs(a_ ) lowerCamelCase_ : int = pipe(**a_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(a_ ) lowerCamelCase_ : Optional[int] = self.pipeline_class.from_pretrained(a_ ) pipe_loaded.to(a_ ) pipe_loaded.set_progress_bar_config(disable=a_ ) for optional_component in pipe._optional_components: self.assertTrue( getattr(a_ , a_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , ) lowerCamelCase_ : List[str] = self.get_dummy_inputs(a_ ) lowerCamelCase_ : Optional[int] = pipe_loaded(**a_ )[0] lowerCamelCase_ : Optional[int] = np.abs(output - output_loaded ).max() self.assertLess(a_ , 1E-4 ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = "cpu" lowerCamelCase_ : int = self.get_dummy_components() lowerCamelCase_ : List[Any] = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : Any = self.get_dummy_mask_inputs(a_ ) lowerCamelCase_ : int = pipe.generate_mask(**a_ ) lowerCamelCase_ : List[Any] = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) lowerCamelCase_ : List[str] = np.array([0] * 9 ) lowerCamelCase_ : Optional[int] = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_ , 1E-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = "cpu" lowerCamelCase_ : Union[str, Any] = self.get_dummy_components() lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : Dict = self.get_dummy_inversion_inputs(a_ ) lowerCamelCase_ : Dict = pipe.invert(**a_ ).images lowerCamelCase_ : str = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowerCamelCase_ : Dict = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_ , 1E-3 ) def _UpperCamelCase ( self ): super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = "cpu" lowerCamelCase_ : int = self.get_dummy_components() lowerCamelCase_ : int = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"} lowerCamelCase_ : Optional[Any] = DPMSolverMultistepScheduler(**a_ ) lowerCamelCase_ : List[str] = DPMSolverMultistepInverseScheduler(**a_ ) lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : int = self.get_dummy_inversion_inputs(a_ ) lowerCamelCase_ : str = pipe.invert(**a_ ).images lowerCamelCase_ : int = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowerCamelCase_ : Union[str, Any] = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) lowerCamelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_ , 1E-3 ) @require_torch_gpu @slow class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def _UpperCamelCase ( cls ): lowerCamelCase_ : Dict = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" ) lowerCamelCase_ : int = raw_image.convert("RGB" ).resize((768, 768) ) lowerCamelCase_ : List[Any] = raw_image def _UpperCamelCase ( self ): lowerCamelCase_ : Dict = torch.manual_seed(0 ) lowerCamelCase_ : Tuple = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa ) lowerCamelCase_ : str = DDIMScheduler.from_config(pipe.scheduler.config ) lowerCamelCase_ : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : str = "a bowl of fruit" lowerCamelCase_ : Optional[int] = "a bowl of pears" lowerCamelCase_ : List[Any] = pipe.generate_mask( image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , ) lowerCamelCase_ : str = pipe.invert( prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ ).latents lowerCamelCase_ : List[str] = pipe( prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0] lowerCamelCase_ : List[str] = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 ) lowerCamelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa ) lowerCamelCase_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) lowerCamelCase_ : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : Any = "a bowl of fruit" lowerCamelCase_ : Dict = "a bowl of pears" lowerCamelCase_ : Optional[Any] = pipe.generate_mask( image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , ) lowerCamelCase_ : str = pipe.invert( prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ , num_inference_steps=25 , ).latents lowerCamelCase_ : Any = pipe( prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0] lowerCamelCase_ : List[str] = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
73
1
import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="session") def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : Tuple = 10 lowerCamelCase_ : Union[str, Any] = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string")), "labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"])), "answers": datasets.Sequence( { "text": datasets.Value("string"), "answer_start": datasets.Value("int32"), }), "id": datasets.Value("int64"), }) lowerCamelCase_ : List[str] = datasets.Dataset.from_dict( { "tokens": [["foo"] * 5] * n, "labels": [[1] * 5] * n, "answers": [{"answer_start": [97], "text": ["1976"]}] * 10, "id": list(range(lowerCAmelCase_)), } , features=lowerCAmelCase_ , ) return dataset @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Any = str(tmp_path_factory.mktemp("data") / "file.arrow") dataset.map(cache_file_name=lowerCAmelCase_) return filename # FILE_CONTENT + files __magic_name__ = '''\ Text data. Second line of data.''' @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : str = tmp_path_factory.mktemp("data") / "file.txt" lowerCamelCase_ : List[str] = FILE_CONTENT with open(lowerCAmelCase_ , "w") as f: f.write(lowerCAmelCase_) return filename @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' import bza lowerCamelCase_ : str = tmp_path_factory.mktemp("data") / "file.txt.bz2" lowerCamelCase_ : Dict = bytes(lowerCAmelCase_ , "utf-8") with bza.open(lowerCAmelCase_ , "wb") as f: f.write(lowerCAmelCase_) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' import gzip lowerCamelCase_ : Optional[Any] = str(tmp_path_factory.mktemp("data") / "file.txt.gz") lowerCamelCase_ : int = bytes(lowerCAmelCase_ , "utf-8") with gzip.open(lowerCAmelCase_ , "wb") as f: f.write(lowerCAmelCase_) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' if datasets.config.LZ4_AVAILABLE: import lza.frame lowerCamelCase_ : str = tmp_path_factory.mktemp("data") / "file.txt.lz4" lowerCamelCase_ : int = bytes(lowerCAmelCase_ , "utf-8") with lza.frame.open(lowerCAmelCase_ , "wb") as f: f.write(lowerCAmelCase_) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if datasets.config.PY7ZR_AVAILABLE: import pyazr lowerCamelCase_ : List[Any] = tmp_path_factory.mktemp("data") / "file.txt.7z" with pyazr.SevenZipFile(lowerCAmelCase_ , "w") as archive: archive.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_)) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' import tarfile lowerCamelCase_ : int = tmp_path_factory.mktemp("data") / "file.txt.tar" with tarfile.TarFile(lowerCAmelCase_ , "w") as f: f.add(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_)) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' import lzma lowerCamelCase_ : Any = tmp_path_factory.mktemp("data") / "file.txt.xz" lowerCamelCase_ : int = bytes(lowerCAmelCase_ , "utf-8") with lzma.open(lowerCAmelCase_ , "wb") as f: f.write(lowerCAmelCase_) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' import zipfile lowerCamelCase_ : Dict = tmp_path_factory.mktemp("data") / "file.txt.zip" with zipfile.ZipFile(lowerCAmelCase_ , "w") as f: f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_)) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd lowerCamelCase_ : Optional[int] = tmp_path_factory.mktemp("data") / "file.txt.zst" lowerCamelCase_ : Optional[int] = bytes(lowerCAmelCase_ , "utf-8") with zstd.open(lowerCAmelCase_ , "wb") as f: f.write(lowerCAmelCase_) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[int] = tmp_path_factory.mktemp("data") / "file.xml" lowerCamelCase_ : Any = textwrap.dedent( "\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>") with open(lowerCAmelCase_ , "w") as f: f.write(lowerCAmelCase_) return filename __magic_name__ = [ {'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0}, {'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0}, {'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0}, {'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0}, ] __magic_name__ = [ {'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0}, {'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0}, ] __magic_name__ = { '''col_1''': ['''0''', '''1''', '''2''', '''3'''], '''col_2''': [0, 1, 2, 3], '''col_3''': [0.0, 1.0, 2.0, 3.0], } __magic_name__ = [ {'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0}, {'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1}, ] __magic_name__ = [ {'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0}, {'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0}, {'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0}, {'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0}, ] @pytest.fixture(scope="session") def __magic_name__ ( ): '''simple docstring''' return DATA_DICT_OF_LISTS @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[Any] = datasets.Dataset.from_dict(lowerCAmelCase_) lowerCamelCase_ : str = str(tmp_path_factory.mktemp("data") / "dataset.arrow") dataset.map(cache_file_name=lowerCAmelCase_) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[int] = str(tmp_path_factory.mktemp("data") / "dataset.sqlite") with contextlib.closing(sqlitea.connect(lowerCAmelCase_)) as con: lowerCamelCase_ : str = con.cursor() cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)") for item in DATA: cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values())) con.commit() return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Any = str(tmp_path_factory.mktemp("data") / "dataset.csv") with open(lowerCAmelCase_ , "w" , newline="") as f: lowerCamelCase_ : str = csv.DictWriter(lowerCAmelCase_ , fieldnames=["col_1", "col_2", "col_3"]) writer.writeheader() for item in DATA: writer.writerow(lowerCAmelCase_) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : str = str(tmp_path_factory.mktemp("data") / "dataset2.csv") with open(lowerCAmelCase_ , "w" , newline="") as f: lowerCamelCase_ : int = csv.DictWriter(lowerCAmelCase_ , fieldnames=["col_1", "col_2", "col_3"]) writer.writeheader() for item in DATA: writer.writerow(lowerCAmelCase_) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' import bza lowerCamelCase_ : Tuple = tmp_path_factory.mktemp("data") / "dataset.csv.bz2" with open(lowerCAmelCase_ , "rb") as f: lowerCamelCase_ : Tuple = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(lowerCAmelCase_ , "wb") as f: f.write(lowerCAmelCase_) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Union[str, Any] = tmp_path_factory.mktemp("data") / "dataset.csv.zip" with zipfile.ZipFile(lowerCAmelCase_ , "w") as f: f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_)) f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_)) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : int = tmp_path_factory.mktemp("data") / "dataset.csv.zip" with zipfile.ZipFile(lowerCAmelCase_ , "w") as f: f.write(lowerCAmelCase_ , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV"))) f.write(lowerCAmelCase_ , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV"))) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Any = tmp_path_factory.mktemp("data") / "dataset_with_dir.csv.zip" with zipfile.ZipFile(lowerCAmelCase_ , "w") as f: f.write(lowerCAmelCase_ , arcname=os.path.join("main_dir" , os.path.basename(lowerCAmelCase_))) f.write(lowerCAmelCase_ , arcname=os.path.join("main_dir" , os.path.basename(lowerCAmelCase_))) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Any = str(tmp_path_factory.mktemp("data") / "dataset.parquet") lowerCamelCase_ : Optional[Any] = pa.schema( { "col_1": pa.string(), "col_2": pa.intaa(), "col_3": pa.floataa(), }) with open(lowerCAmelCase_ , "wb") as f: lowerCamelCase_ : Any = pq.ParquetWriter(lowerCAmelCase_ , schema=lowerCAmelCase_) lowerCamelCase_ : Dict = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCAmelCase_))] for k in DATA[0]} , schema=lowerCAmelCase_) writer.write_table(lowerCAmelCase_) writer.close() return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[int] = str(tmp_path_factory.mktemp("data") / "dataset.json") lowerCamelCase_ : Optional[Any] = {"data": DATA} with open(lowerCAmelCase_ , "w") as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : List[str] = str(tmp_path_factory.mktemp("data") / "dataset.json") lowerCamelCase_ : List[Any] = {"data": DATA_DICT_OF_LISTS} with open(lowerCAmelCase_ , "w") as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp("data") / "dataset.jsonl") with open(lowerCAmelCase_ , "w") as f: for item in DATA: f.write(json.dumps(lowerCAmelCase_) + "\n") return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : str = str(tmp_path_factory.mktemp("data") / "dataset2.jsonl") with open(lowerCAmelCase_ , "w") as f: for item in DATA: f.write(json.dumps(lowerCAmelCase_) + "\n") return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Any = str(tmp_path_factory.mktemp("data") / "dataset_312.jsonl") with open(lowerCAmelCase_ , "w") as f: for item in DATA_312: f.write(json.dumps(lowerCAmelCase_) + "\n") return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[Any] = str(tmp_path_factory.mktemp("data") / "dataset-str.jsonl") with open(lowerCAmelCase_ , "w") as f: for item in DATA_STR: f.write(json.dumps(lowerCAmelCase_) + "\n") return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' import gzip lowerCamelCase_ : Optional[Any] = str(tmp_path_factory.mktemp("data") / "dataset.txt.gz") with open(lowerCAmelCase_ , "rb") as orig_file: with gzip.open(lowerCAmelCase_ , "wb") as zipped_file: zipped_file.writelines(lowerCAmelCase_) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' import gzip lowerCamelCase_ : List[str] = str(tmp_path_factory.mktemp("data") / "dataset.jsonl.gz") with open(lowerCAmelCase_ , "rb") as orig_file: with gzip.open(lowerCAmelCase_ , "wb") as zipped_file: zipped_file.writelines(lowerCAmelCase_) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : int = tmp_path_factory.mktemp("data") / "dataset.jsonl.zip" with zipfile.ZipFile(lowerCAmelCase_ , "w") as f: f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_)) f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_)) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[Any] = tmp_path_factory.mktemp("data") / "dataset_nested.jsonl.zip" with zipfile.ZipFile(lowerCAmelCase_ , "w") as f: f.write(lowerCAmelCase_ , arcname=os.path.join("nested" , os.path.basename(lowerCAmelCase_))) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Dict = tmp_path_factory.mktemp("data") / "dataset_with_dir.jsonl.zip" with zipfile.ZipFile(lowerCAmelCase_ , "w") as f: f.write(lowerCAmelCase_ , arcname=os.path.join("main_dir" , os.path.basename(lowerCAmelCase_))) f.write(lowerCAmelCase_ , arcname=os.path.join("main_dir" , os.path.basename(lowerCAmelCase_))) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[int] = tmp_path_factory.mktemp("data") / "dataset.jsonl.tar" with tarfile.TarFile(lowerCAmelCase_ , "w") as f: f.add(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_)) f.add(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_)) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : List[str] = tmp_path_factory.mktemp("data") / "dataset_nested.jsonl.tar" with tarfile.TarFile(lowerCAmelCase_ , "w") as f: f.add(lowerCAmelCase_ , arcname=os.path.join("nested" , os.path.basename(lowerCAmelCase_))) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Tuple = ["0", "1", "2", "3"] lowerCamelCase_ : Dict = str(tmp_path_factory.mktemp("data") / "dataset.txt") with open(lowerCAmelCase_ , "w") as f: for item in data: f.write(item + "\n") return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : str = ["0", "1", "2", "3"] lowerCamelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp("data") / "dataset2.txt") with open(lowerCAmelCase_ , "w") as f: for item in data: f.write(item + "\n") return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Union[str, Any] = ["0", "1", "2", "3"] lowerCamelCase_ : str = tmp_path_factory.mktemp("data") / "dataset.abc" with open(lowerCAmelCase_ , "w") as f: for item in data: f.write(item + "\n") return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Dict = tmp_path_factory.mktemp("data") / "dataset.text.zip" with zipfile.ZipFile(lowerCAmelCase_ , "w") as f: f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_)) f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_)) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[int] = tmp_path_factory.mktemp("data") / "dataset_with_dir.text.zip" with zipfile.ZipFile(lowerCAmelCase_ , "w") as f: f.write(lowerCAmelCase_ , arcname=os.path.join("main_dir" , os.path.basename(lowerCAmelCase_))) f.write(lowerCAmelCase_ , arcname=os.path.join("main_dir" , os.path.basename(lowerCAmelCase_))) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[Any] = tmp_path_factory.mktemp("data") / "dataset.ext.zip" with zipfile.ZipFile(lowerCAmelCase_ , "w") as f: f.write(lowerCAmelCase_ , arcname=os.path.basename("unsupported.ext")) f.write(lowerCAmelCase_ , arcname=os.path.basename("unsupported_2.ext")) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : List[Any] = "\n".join(["First", "Second\u2029with Unicode new line", "Third"]) lowerCamelCase_ : str = str(tmp_path_factory.mktemp("data") / "dataset_with_unicode_new_lines.txt") with open(lowerCAmelCase_ , "w" , encoding="utf-8") as f: f.write(lowerCAmelCase_) return path @pytest.fixture(scope="session") def __magic_name__ ( ): '''simple docstring''' return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg") @pytest.fixture(scope="session") def __magic_name__ ( ): '''simple docstring''' return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav") @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : List[Any] = tmp_path_factory.mktemp("data") / "dataset.img.zip" with zipfile.ZipFile(lowerCAmelCase_ , "w") as f: f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_)) f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_).replace(".jpg" , "2.jpg")) return path @pytest.fixture(scope="session") def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Dict = tmp_path_factory.mktemp("data_dir") (data_dir / "subdir").mkdir() with open(data_dir / "subdir" / "train.txt" , "w") as f: f.write("foo\n" * 10) with open(data_dir / "subdir" / "test.txt" , "w") as f: f.write("bar\n" * 10) # hidden file with open(data_dir / "subdir" / ".test.txt" , "w") as f: f.write("bar\n" * 10) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / ".subdir" / "train.txt" , "w") as f: f.write("foo\n" * 10) with open(data_dir / ".subdir" / "test.txt" , "w") as f: f.write("bar\n" * 10) return data_dir
73
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase ( self ): lowerCamelCase_ : int = ["a", "b", "c"] # Defaults to last layer if both are None lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , a_ , a_ ) self.assertEqual(a_ , ["c"] ) self.assertEqual(a_ , [2] ) # Out indices set to match out features lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ ) self.assertEqual(a_ , ["a", "c"] ) self.assertEqual(a_ , [0, 2] ) # Out features set to match out indices lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ ) self.assertEqual(a_ , ["a", "c"] ) self.assertEqual(a_ , [0, 2] ) # Out features selected from negative indices lowerCamelCase_ ,lowerCamelCase_ : Dict = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ ) self.assertEqual(a_ , ["a", "c"] ) self.assertEqual(a_ , [-3, -1] ) def _UpperCamelCase ( self ): # Stage names must be set with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ ) # Out features must be a list with self.assertRaises(a_ ): verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] ) # Out features must be a subset of stage names with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] ) # Out indices must be a list or tuple with self.assertRaises(a_ ): verify_out_features_out_indices(a_ , 0 , ["a", "b"] ) # Out indices must be a subset of stage names with self.assertRaises(a_ ): verify_out_features_out_indices(a_ , (0, 1) , ["a"] ) # Out features and out indices must be the same length with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] ) # Out features should match out indices with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] ) # Out features and out indices should be in order with self.assertRaises(a_ ): verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] ) # Check passes with valid inputs verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = BackboneMixin() lowerCamelCase_ : List[Any] = ["a", "b", "c"] lowerCamelCase_ : Optional[int] = ["a", "c"] lowerCamelCase_ : Dict = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly lowerCamelCase_ : Union[str, Any] = ["a", "b"] self.assertEqual(backbone.out_features , ["a", "b"] ) self.assertEqual(backbone.out_indices , [0, 1] ) lowerCamelCase_ : str = [-3, -1] self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [-3, -1] )
73
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available __magic_name__ = {'''tokenization_herbert''': ['''HerbertTokenizer''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['''HerbertTokenizerFast'''] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
73
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils ) __UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] ) __UpperCAmelCase : Tuple = ['''accelerate''', '''launch'''] __UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate''' __UpperCAmelCase : int = '''default_config.yaml''' __UpperCAmelCase : Tuple = config_folder / config_file __UpperCAmelCase : int = config_folder / '''_default_config.yaml''' __UpperCAmelCase : int = Path('''tests/test_configs''' ) @classmethod def _UpperCamelCase ( cls ): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def _UpperCamelCase ( cls ): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def _UpperCamelCase ( self ): for config in sorted(self.test_config_path.glob("**/*.yaml" ) ): with self.subTest(config_file=a_ ): execute_subprocess_async( self.base_cmd + ["--config_file", str(a_ ), self.test_file_path] , env=os.environ.copy() ) def _UpperCamelCase ( self ): execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() ) class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase : List[Any] = '''test-tpu''' __UpperCAmelCase : Tuple = '''us-central1-a''' __UpperCAmelCase : Tuple = '''ls''' __UpperCAmelCase : str = ['''accelerate''', '''tpu-config'''] __UpperCAmelCase : Dict = '''cd /usr/share''' __UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh''' __UpperCAmelCase : Dict = '''Running gcloud compute tpus tpu-vm ssh''' def _UpperCamelCase ( self ): lowerCamelCase_ : Any = run_command( self.cmd + ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = run_command( self.cmd + [ "--config_file", "tests/test_configs/0_12_0.yaml", "--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug", ] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Union[str, Any] = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a_ ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Any = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = run_command( self.cmd + [ "--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--command", "echo \"Hello World\"", "--debug", ] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Dict = run_command( self.cmd + [ "--config_file", "tests/test_configs/0_12_0.yaml", "--command_file", self.command_file, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug", ] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : str = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Any = run_command( self.cmd + [ "--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--accelerate_version", "12.0.0", "--debug", ] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
73
1
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def __magic_name__ ( lowerCAmelCase_ = 8): '''simple docstring''' lowerCamelCase_ : int = ascii_letters + digits + punctuation return "".join(secrets.choice(lowerCAmelCase_) for _ in range(lowerCAmelCase_)) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' i -= len(lowerCAmelCase_) lowerCamelCase_ : List[str] = i // 3 lowerCamelCase_ : Any = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) lowerCamelCase_ : Optional[Any] = ( chars_incl + random(lowerCAmelCase_ , quotient + remainder) + random(lowerCAmelCase_ , lowerCAmelCase_) + random(lowerCAmelCase_ , lowerCAmelCase_) ) lowerCamelCase_ : int = list(lowerCAmelCase_) shuffle(lowerCAmelCase_) return "".join(lowerCAmelCase_) # random is a generalised function for letters, characters and numbers def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' return "".join(secrets.choice(lowerCAmelCase_) for _ in range(lowerCAmelCase_)) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' pass # Put your code here... def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' pass # Put your code here... def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' pass # Put your code here... def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ = 8): '''simple docstring''' if len(lowerCAmelCase_) < min_length: # Your Password must be at least 8 characters long return False lowerCamelCase_ : Any = any(char in ascii_uppercase for char in password) lowerCamelCase_ : Optional[int] = any(char in ascii_lowercase for char in password) lowerCamelCase_ : Any = any(char in digits for char in password) lowerCamelCase_ : Any = any(char in punctuation for char in password) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : Optional[int] = int(input("Please indicate the max length of your password: ").strip()) lowerCamelCase_ : List[Any] = input( "Please indicate the characters that must be in your password: ").strip() print("Password generated:" , password_generator(lowerCAmelCase_)) print( "Alternative Password generated:" , alternative_password_generator(lowerCAmelCase_ , lowerCAmelCase_) , ) print("[If you are thinking of using this passsword, You better save it.]") if __name__ == "__main__": main()
73
import inspect from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel, VQModel from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def __init__( self , a_ , a_ , a_ ): super().__init__() self.register_modules(vqvae=a_ , unet=a_ , scheduler=a_ ) @torch.no_grad() def __call__( self , a_ = 1 , a_ = None , a_ = 0.0 , a_ = 50 , a_ = "pil" , a_ = True , **a_ , ): lowerCamelCase_ : Optional[Any] = randn_tensor( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , ) lowerCamelCase_ : Optional[int] = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowerCamelCase_ : Optional[int] = latents * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(a_ ) # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature lowerCamelCase_ : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCamelCase_ : Optional[int] = {} if accepts_eta: lowerCamelCase_ : Optional[int] = eta for t in self.progress_bar(self.scheduler.timesteps ): lowerCamelCase_ : Dict = self.scheduler.scale_model_input(a_ , a_ ) # predict the noise residual lowerCamelCase_ : Optional[Any] = self.unet(a_ , a_ ).sample # compute the previous noisy sample x_t -> x_t-1 lowerCamelCase_ : List[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample # decode the image latents with the VAE lowerCamelCase_ : str = self.vqvae.decode(a_ ).sample lowerCamelCase_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 ) lowerCamelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCamelCase_ : Optional[Any] = self.numpy_to_pil(a_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=a_ )
73
1
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class lowerCAmelCase__ : """simple docstring""" def _UpperCamelCase ( self , a_ , a_ , a_ ): return None class lowerCAmelCase__ : """simple docstring""" def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ): return None class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase : int = [ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def _UpperCamelCase ( self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(a_ , "tf" , 12 , **a_ ) @require_torch @slow def _UpperCamelCase ( self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(a_ , "pt" , 12 , **a_ ) @require_torch @slow def _UpperCamelCase ( self ): from transformers import BertModel lowerCamelCase_ : Dict = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"] with NamedTemporaryFile(mode="w+t" ) as vocab_file: vocab_file.write("\n".join(a_ ) ) vocab_file.flush() lowerCamelCase_ : Dict = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowerCamelCase_ : Tuple = BertModel(BertConfig(vocab_size=len(a_ ) ) ) model.save_pretrained(a_ ) self._test_export(a_ , "pt" , 12 , a_ ) @require_tf @slow def _UpperCamelCase ( self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCamelCase_ : Dict = self._test_export(a_ , "tf" , 12 , **a_ ) lowerCamelCase_ : List[Any] = quantize(Path(a_ ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(a_ ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) @require_torch @slow def _UpperCamelCase ( self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCamelCase_ : int = self._test_export(a_ , "pt" , 12 , **a_ ) lowerCamelCase_ : Union[str, Any] = quantize(a_ ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(a_ ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_=None , **a_ ): try: # Compute path with TemporaryDirectory() as tempdir: lowerCamelCase_ : Tuple = Path(a_ ).joinpath("model.onnx" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(a_ , a_ , a_ , a_ , a_ , **a_ ) return path except Exception as e: self.fail(a_ ) @require_torch @require_tokenizers @slow def _UpperCamelCase ( self ): from transformers import BertModel lowerCamelCase_ : str = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) lowerCamelCase_ : int = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(a_ , a_ , "pt" ) @require_tf @require_tokenizers @slow def _UpperCamelCase ( self ): from transformers import TFBertModel lowerCamelCase_ : List[str] = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) lowerCamelCase_ : List[str] = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(a_ , a_ , "tf" ) def _UpperCamelCase ( self , a_ , a_ , a_ ): lowerCamelCase_ : List[str] = FeatureExtractionPipeline(a_ , a_ ) lowerCamelCase_ : Dict = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"] lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Tuple = infer_shapes(a_ , a_ ) # Assert all variables are present self.assertEqual(len(a_ ) , len(a_ ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , a_ ) self.assertSequenceEqual(variable_names[3:] , a_ ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} ) self.assertDictEqual(shapes["output_1"] , {0: "batch"} ) def _UpperCamelCase ( self ): lowerCamelCase_ : int = ["input_ids", "attention_mask", "token_type_ids"] lowerCamelCase_ : Optional[int] = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]} lowerCamelCase_ ,lowerCamelCase_ : Tuple = ensure_valid_input(FuncContiguousArgs() , a_ , a_ ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(a_ ) , 3 ) # Should have exactly the same input names self.assertEqual(set(a_ ) , set(a_ ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(a_ , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowerCamelCase_ ,lowerCamelCase_ : Any = ensure_valid_input(FuncNonContiguousArgs() , a_ , a_ ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(a_ ) , 1 ) self.assertEqual(len(a_ ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens["input_ids"] ) self.assertEqual(ordered_input_names[0] , "input_ids" ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" ) self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
73
import re def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' if len(re.findall("[ATCG]" , lowerCAmelCase_)) != len(lowerCAmelCase_): raise ValueError("Invalid Strand") return dna.translate(dna.maketrans("ATCG" , "TAGC")) if __name__ == "__main__": import doctest doctest.testmod()
73
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : str = '''unispeech''' def __init__( self , a_=32 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=0.1 , a_=0.0 , a_=0.0 , a_=0.1 , a_=0.1 , a_=0.02 , a_=1E-5 , a_="group" , a_="gelu" , a_=(512, 512, 512, 512, 512, 512, 512) , a_=(5, 2, 2, 2, 2, 2, 2) , a_=(10, 3, 3, 3, 3, 2, 2) , a_=False , a_=128 , a_=16 , a_=False , a_=True , a_=0.05 , a_=10 , a_=2 , a_=0.0 , a_=10 , a_=0 , a_=320 , a_=2 , a_=0.1 , a_=100 , a_=256 , a_=256 , a_=0.1 , a_="mean" , a_=False , a_=False , a_=256 , a_=80 , a_=0 , a_=1 , a_=2 , a_=0.5 , **a_ , ): super().__init__(**a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ ) lowerCamelCase_ : Union[str, Any] = hidden_size lowerCamelCase_ : Any = feat_extract_norm lowerCamelCase_ : List[Any] = feat_extract_activation lowerCamelCase_ : Optional[int] = list(a_ ) lowerCamelCase_ : Optional[int] = list(a_ ) lowerCamelCase_ : List[str] = list(a_ ) lowerCamelCase_ : Union[str, Any] = conv_bias lowerCamelCase_ : Union[str, Any] = num_conv_pos_embeddings lowerCamelCase_ : Tuple = num_conv_pos_embedding_groups lowerCamelCase_ : List[Any] = len(self.conv_dim ) lowerCamelCase_ : str = num_hidden_layers lowerCamelCase_ : List[Any] = intermediate_size lowerCamelCase_ : Union[str, Any] = hidden_act lowerCamelCase_ : int = num_attention_heads lowerCamelCase_ : str = hidden_dropout lowerCamelCase_ : Union[str, Any] = attention_dropout lowerCamelCase_ : List[str] = activation_dropout lowerCamelCase_ : int = feat_proj_dropout lowerCamelCase_ : Any = final_dropout lowerCamelCase_ : Optional[int] = layerdrop lowerCamelCase_ : Any = layer_norm_eps lowerCamelCase_ : List[str] = initializer_range lowerCamelCase_ : Dict = num_ctc_classes lowerCamelCase_ : Optional[Any] = vocab_size lowerCamelCase_ : Any = do_stable_layer_norm lowerCamelCase_ : List[Any] = use_weighted_layer_sum lowerCamelCase_ : Union[str, Any] = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase_ : Optional[Any] = apply_spec_augment lowerCamelCase_ : Dict = mask_time_prob lowerCamelCase_ : Union[str, Any] = mask_time_length lowerCamelCase_ : List[str] = mask_time_min_masks lowerCamelCase_ : List[str] = mask_feature_prob lowerCamelCase_ : Union[str, Any] = mask_feature_length lowerCamelCase_ : Any = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowerCamelCase_ : Any = num_codevectors_per_group lowerCamelCase_ : List[Any] = num_codevector_groups lowerCamelCase_ : Dict = contrastive_logits_temperature lowerCamelCase_ : Union[str, Any] = feat_quantizer_dropout lowerCamelCase_ : Union[str, Any] = num_negatives lowerCamelCase_ : Optional[int] = codevector_dim lowerCamelCase_ : Optional[int] = proj_codevector_dim lowerCamelCase_ : Any = diversity_loss_weight # ctc loss lowerCamelCase_ : Optional[Any] = ctc_loss_reduction lowerCamelCase_ : int = ctc_zero_infinity # pretraining loss lowerCamelCase_ : Dict = replace_prob @property def _UpperCamelCase ( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
73
from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False): '''simple docstring''' if radian_mode: return [magnitude * cos(lowerCAmelCase_), magnitude * sin(lowerCAmelCase_)] return [magnitude * cos(radians(lowerCAmelCase_)), magnitude * sin(radians(lowerCAmelCase_))] def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10**-1): '''simple docstring''' lowerCamelCase_ : NDArray[floataa] = cross(lowerCAmelCase_ , lowerCAmelCase_) lowerCamelCase_ : float = sum(lowerCAmelCase_) return abs(lowerCAmelCase_) < eps if __name__ == "__main__": # Test to check if it works __magic_name__ = array( [ polar_force(7_18.4, 1_8_0 - 3_0), polar_force(8_79.54, 4_5), polar_force(1_0_0, -9_0), ] ) __magic_name__ = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg __magic_name__ = array( [ polar_force(3_0 * 9.81, 1_5), polar_force(2_1_5, 1_8_0 - 4_5), polar_force(2_6_4, 9_0 - 3_0), ] ) __magic_name__ = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg __magic_name__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]]) __magic_name__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
73
1
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : List[Any] = ['''image_processor''', '''tokenizer'''] __UpperCAmelCase : str = '''LayoutLMv3ImageProcessor''' __UpperCAmelCase : Tuple = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''') def __init__( self , a_=None , a_=None , **a_ ): lowerCamelCase_ : Optional[Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , a_ , ) lowerCamelCase_ : Dict = kwargs.pop("feature_extractor" ) lowerCamelCase_ : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(a_ , a_ ) def __call__( self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = True , a_ = False , a_ = None , a_ = None , a_ = 0 , a_ = None , a_ = None , a_ = None , a_ = False , a_ = False , a_ = False , a_ = False , a_ = True , a_ = None , **a_ , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor lowerCamelCase_ : Optional[Any] = self.image_processor(images=a_ , return_tensors=a_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(a_ , a_ ): lowerCamelCase_ : Tuple = [text] # add batch dimension (as the image processor always adds a batch dimension) lowerCamelCase_ : Optional[Any] = features["words"] lowerCamelCase_ : Optional[Any] = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , ) # add pixel values lowerCamelCase_ : Any = features.pop("pixel_values" ) if return_overflowing_tokens is True: lowerCamelCase_ : str = self.get_overflowing_images(a_ , encoded_inputs["overflow_to_sample_mapping"] ) lowerCamelCase_ : Union[str, Any] = images return encoded_inputs def _UpperCamelCase ( self , a_ , a_ ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image lowerCamelCase_ : Any = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(a_ ) != len(a_ ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" F""" {len(a_ )} and {len(a_ )}""" ) return images_with_overflow def _UpperCamelCase ( self , *a_ , **a_ ): return self.tokenizer.batch_decode(*a_ , **a_ ) def _UpperCamelCase ( self , *a_ , **a_ ): return self.tokenizer.decode(*a_ , **a_ ) @property def _UpperCamelCase ( self ): return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def _UpperCamelCase ( self ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , ) return self.image_processor_class @property def _UpperCamelCase ( self ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , ) return self.image_processor
73
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Dict = '''ClapFeatureExtractor''' __UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self , a_ , a_ ): super().__init__(a_ , a_ ) def __call__( self , a_=None , a_=None , a_=None , **a_ ): lowerCamelCase_ : Any = kwargs.pop("sampling_rate" , a_ ) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none." ) if text is not None: lowerCamelCase_ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if audios is not None: lowerCamelCase_ : List[str] = self.feature_extractor( a_ , sampling_rate=a_ , return_tensors=a_ , **a_ ) if text is not None and audios is not None: lowerCamelCase_ : List[str] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def _UpperCamelCase ( self , *a_ , **a_ ): return self.tokenizer.batch_decode(*a_ , **a_ ) def _UpperCamelCase ( self , *a_ , **a_ ): return self.tokenizer.decode(*a_ , **a_ ) @property def _UpperCamelCase ( self ): lowerCamelCase_ : int = self.tokenizer.model_input_names lowerCamelCase_ : Dict = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
73
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''SCUT-DLVCLab/lilt-roberta-en-base''': ( '''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json''' ), } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Tuple = '''lilt''' def __init__( self , a_=3_0522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=0 , a_="absolute" , a_=None , a_=4 , a_=1024 , **a_ , ): super().__init__(pad_token_id=a_ , **a_ ) lowerCamelCase_ : Dict = vocab_size lowerCamelCase_ : Tuple = hidden_size lowerCamelCase_ : List[str] = num_hidden_layers lowerCamelCase_ : List[str] = num_attention_heads lowerCamelCase_ : Union[str, Any] = hidden_act lowerCamelCase_ : List[Any] = intermediate_size lowerCamelCase_ : Dict = hidden_dropout_prob lowerCamelCase_ : Optional[int] = attention_probs_dropout_prob lowerCamelCase_ : str = max_position_embeddings lowerCamelCase_ : Union[str, Any] = type_vocab_size lowerCamelCase_ : int = initializer_range lowerCamelCase_ : Optional[int] = layer_norm_eps lowerCamelCase_ : Tuple = position_embedding_type lowerCamelCase_ : Union[str, Any] = classifier_dropout lowerCamelCase_ : List[str] = channel_shrink_ratio lowerCamelCase_ : Union[str, Any] = max_ad_position_embeddings
73
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ): '''simple docstring''' lowerCamelCase_ : Any = set() # Replace all the whitespace in our sentence lowerCamelCase_ : str = input_str.replace(" " , "") for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower()) return len(lowerCAmelCase_) == 26 def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ): '''simple docstring''' lowerCamelCase_ : List[Any] = [False] * 26 for char in input_str: if char.islower(): lowerCamelCase_ : List[Any] = True elif char.isupper(): lowerCamelCase_ : Optional[int] = True return all(lowerCAmelCase_) def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ): '''simple docstring''' return len({char for char in input_str.lower() if char.isalpha()}) == 26 def __magic_name__ ( ): '''simple docstring''' from timeit import timeit lowerCamelCase_ : Optional[int] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest" print(timeit("is_pangram()" , setup=lowerCAmelCase_)) print(timeit("is_pangram_faster()" , setup=lowerCAmelCase_)) print(timeit("is_pangram_fastest()" , setup=lowerCAmelCase_)) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
73
1
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''microsoft/conditional-detr-resnet-50''': ( '''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json''' ), } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = '''conditional_detr''' __UpperCAmelCase : Dict = ['''past_key_values'''] __UpperCAmelCase : List[Any] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self , a_=True , a_=None , a_=3 , a_=300 , a_=6 , a_=2048 , a_=8 , a_=6 , a_=2048 , a_=8 , a_=0.0 , a_=0.0 , a_=True , a_="relu" , a_=256 , a_=0.1 , a_=0.0 , a_=0.0 , a_=0.02 , a_=1.0 , a_=False , a_="sine" , a_="resnet50" , a_=True , a_=False , a_=2 , a_=5 , a_=2 , a_=1 , a_=1 , a_=2 , a_=5 , a_=2 , a_=0.25 , **a_ , ): if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) lowerCamelCase_ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(a_ , a_ ): lowerCamelCase_ : Tuple = backbone_config.get("model_type" ) lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[backbone_model_type] lowerCamelCase_ : Union[str, Any] = config_class.from_dict(a_ ) lowerCamelCase_ : Any = use_timm_backbone lowerCamelCase_ : int = backbone_config lowerCamelCase_ : Optional[int] = num_channels lowerCamelCase_ : Dict = num_queries lowerCamelCase_ : List[str] = d_model lowerCamelCase_ : Dict = encoder_ffn_dim lowerCamelCase_ : Tuple = encoder_layers lowerCamelCase_ : Optional[int] = encoder_attention_heads lowerCamelCase_ : Dict = decoder_ffn_dim lowerCamelCase_ : Any = decoder_layers lowerCamelCase_ : Optional[int] = decoder_attention_heads lowerCamelCase_ : Optional[int] = dropout lowerCamelCase_ : int = attention_dropout lowerCamelCase_ : Any = activation_dropout lowerCamelCase_ : List[Any] = activation_function lowerCamelCase_ : Dict = init_std lowerCamelCase_ : Optional[int] = init_xavier_std lowerCamelCase_ : List[Any] = encoder_layerdrop lowerCamelCase_ : Dict = decoder_layerdrop lowerCamelCase_ : List[str] = encoder_layers lowerCamelCase_ : str = auxiliary_loss lowerCamelCase_ : List[str] = position_embedding_type lowerCamelCase_ : List[str] = backbone lowerCamelCase_ : Optional[int] = use_pretrained_backbone lowerCamelCase_ : int = dilation # Hungarian matcher lowerCamelCase_ : Optional[Any] = class_cost lowerCamelCase_ : List[str] = bbox_cost lowerCamelCase_ : Optional[int] = giou_cost # Loss coefficients lowerCamelCase_ : Optional[int] = mask_loss_coefficient lowerCamelCase_ : Optional[int] = dice_loss_coefficient lowerCamelCase_ : List[str] = cls_loss_coefficient lowerCamelCase_ : Tuple = bbox_loss_coefficient lowerCamelCase_ : Any = giou_loss_coefficient lowerCamelCase_ : Optional[int] = focal_alpha super().__init__(is_encoder_decoder=a_ , **a_ ) @property def _UpperCamelCase ( self ): return self.encoder_attention_heads @property def _UpperCamelCase ( self ): return self.d_model def _UpperCamelCase ( self ): lowerCamelCase_ : Dict = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowerCamelCase_ : int = self.backbone_config.to_dict() lowerCamelCase_ : Any = self.__class__.model_type return output class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : List[str] = version.parse('''1.11''' ) @property def _UpperCamelCase ( self ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _UpperCamelCase ( self ): return 1E-5 @property def _UpperCamelCase ( self ): return 12
73
__magic_name__ = { "joule": 1.0, "kilojoule": 1_0_0_0, "megajoule": 1_0_0_0_0_0_0, "gigajoule": 1_0_0_0_0_0_0_0_0_0, "wattsecond": 1.0, "watthour": 3_6_0_0, "kilowatthour": 3_6_0_0_0_0_0, "newtonmeter": 1.0, "calorie_nutr": 4_1_8_6.8, "kilocalorie_nutr": 4_1_8_6_8_0_0.0_0, "electronvolt": 1.602_176_634E-19, "britishthermalunit_it": 1_0_5_5.0_5_5_8_5, "footpound": 1.35_58_18, } def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: lowerCamelCase_ : List[Any] = ( F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" F"""Valid values are: {', '.join(lowerCAmelCase_)}""" ) raise ValueError(lowerCAmelCase_) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
73
1
def __magic_name__ ( lowerCAmelCase_): # noqa: E741 '''simple docstring''' lowerCamelCase_ : Optional[Any] = len(lowerCAmelCase_) lowerCamelCase_ : Optional[Any] = 0 lowerCamelCase_ : Union[str, Any] = [0] * n lowerCamelCase_ : List[Any] = [False] * n lowerCamelCase_ : List[str] = [False] * n def dfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): if parent == root: out_edge_count += 1 lowerCamelCase_ : Union[str, Any] = True lowerCamelCase_ : Any = at for to in l[at]: if to == parent: pass elif not visited[to]: lowerCamelCase_ : Optional[Any] = dfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) lowerCamelCase_ : Optional[Any] = min(low[at] , low[to]) # AP found via bridge if at < low[to]: lowerCamelCase_ : Optional[Any] = True # AP found via cycle if at == low[to]: lowerCamelCase_ : List[str] = True else: lowerCamelCase_ : Dict = min(low[at] , lowerCAmelCase_) return out_edge_count for i in range(lowerCAmelCase_): if not visited[i]: lowerCamelCase_ : List[Any] = 0 lowerCamelCase_ : Optional[int] = dfs(lowerCAmelCase_ , lowerCAmelCase_ , -1 , lowerCAmelCase_) lowerCamelCase_ : str = out_edge_count > 1 for x in range(len(lowerCAmelCase_)): if is_art[x] is True: print(lowerCAmelCase_) # Adjacency list of graph __magic_name__ = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
73
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'''vocab_file''': '''spiece.model'''} __magic_name__ = { '''vocab_file''': { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''', } } __magic_name__ = { '''xlnet-base-cased''': None, '''xlnet-large-cased''': None, } # Segments (not really needed) __magic_name__ = 0 __magic_name__ = 1 __magic_name__ = 2 __magic_name__ = 3 __magic_name__ = 4 class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Tuple = VOCAB_FILES_NAMES __UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Optional[int] = '''left''' def __init__( self , a_ , a_=False , a_=True , a_=False , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_=["<eop>", "<eod>"] , a_ = None , **a_ , ): # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token lowerCamelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , ) lowerCamelCase_ : str = 3 lowerCamelCase_ : Dict = do_lower_case lowerCamelCase_ : str = remove_space lowerCamelCase_ : Tuple = keep_accents lowerCamelCase_ : Dict = vocab_file lowerCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(a_ ) @property def _UpperCamelCase ( self ): return len(self.sp_model ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): lowerCamelCase_ : Any = self.__dict__.copy() lowerCamelCase_ : Optional[int] = None return state def __setstate__( self , a_ ): lowerCamelCase_ : Union[str, Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCamelCase_ : int = {} lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _UpperCamelCase ( self , a_ ): if self.remove_space: lowerCamelCase_ : Optional[int] = " ".join(inputs.strip().split() ) else: lowerCamelCase_ : str = inputs lowerCamelCase_ : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: lowerCamelCase_ : Dict = unicodedata.normalize("NFKD" , a_ ) lowerCamelCase_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] ) if self.do_lower_case: lowerCamelCase_ : Any = outputs.lower() return outputs def _UpperCamelCase ( self , a_ ): lowerCamelCase_ : List[Any] = self.preprocess_text(a_ ) lowerCamelCase_ : Optional[int] = self.sp_model.encode(a_ , out_type=a_ ) lowerCamelCase_ : List[str] = [] for piece in pieces: if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): lowerCamelCase_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCamelCase_ : int = cur_pieces[1:] else: lowerCamelCase_ : Union[str, Any] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(a_ ) else: new_pieces.append(a_ ) return new_pieces def _UpperCamelCase ( self , a_ ): return self.sp_model.PieceToId(a_ ) def _UpperCamelCase ( self , a_ ): return self.sp_model.IdToPiece(a_ ) def _UpperCamelCase ( self , a_ ): lowerCamelCase_ : Dict = "".join(a_ ).replace(a_ , " " ).strip() return out_string def _UpperCamelCase ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ): lowerCamelCase_ : int = kwargs.pop("use_source_tokenizer" , a_ ) lowerCamelCase_ : List[str] = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 lowerCamelCase_ : Optional[int] = [] lowerCamelCase_ : List[str] = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(a_ ) ) lowerCamelCase_ : Union[str, Any] = [] sub_texts.append(a_ ) else: current_sub_text.append(a_ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(a_ ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens lowerCamelCase_ : Union[str, Any] = "".join(a_ ) lowerCamelCase_ : Optional[Any] = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: lowerCamelCase_ : List[Any] = self.clean_up_tokenization(a_ ) return clean_text else: return text def _UpperCamelCase ( self , a_ , a_ = None ): lowerCamelCase_ : Optional[Any] = [self.sep_token_id] lowerCamelCase_ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ ) if token_ids_a is not None: return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1] return ([0] * len(a_ )) + [1, 1] def _UpperCamelCase ( self , a_ , a_ = None ): lowerCamelCase_ : Optional[Any] = [self.sep_token_id] lowerCamelCase_ : Union[str, Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _UpperCamelCase ( self , a_ , a_ = None ): if not os.path.isdir(a_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCamelCase_ : Any = os.path.join( a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , a_ ) elif not os.path.isfile(self.vocab_file ): with open(a_ , "wb" ) as fi: lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto() fi.write(a_ ) return (out_vocab_file,)
73
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __magic_name__ = { '''configuration_autoformer''': [ '''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AutoformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ '''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AutoformerForPrediction''', '''AutoformerModel''', '''AutoformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
73
def __magic_name__ ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = True): '''simple docstring''' assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError("Invalid value for min_val or max_val (min_value < max_value)") return min_val if option else max_val def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' return int((number_a + number_a) / 2) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) ), 'argument values must be type of "int"' if lower > higher: raise ValueError("argument value for lower and higher must be(lower > higher)") if not lower < to_guess < higher: raise ValueError( "guess value must be within the range of lower and higher value") def answer(lowerCAmelCase_) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print("started...") lowerCamelCase_ : Optional[int] = lower lowerCamelCase_ : Tuple = higher lowerCamelCase_ : Union[str, Any] = [] while True: lowerCamelCase_ : Optional[int] = get_avg(lowerCAmelCase_ , lowerCAmelCase_) last_numbers.append(lowerCAmelCase_) if answer(lowerCAmelCase_) == "low": lowerCamelCase_ : Any = number elif answer(lowerCAmelCase_) == "high": lowerCamelCase_ : Optional[int] = number else: break print(F"""guess the number : {last_numbers[-1]}""") print(F"""details : {last_numbers!s}""") def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : Optional[int] = int(input("Enter lower value : ").strip()) lowerCamelCase_ : List[str] = int(input("Enter high value : ").strip()) lowerCamelCase_ : List[str] = int(input("Enter value to guess : ").strip()) guess_the_number(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) if __name__ == "__main__": main()
73
1
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[Any] = 0 lowerCamelCase_ : Dict = len(lowerCAmelCase_) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None lowerCamelCase_ : str = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(lowerCAmelCase_): return None lowerCamelCase_ : Tuple = sorted_collection[point] if current_item == item: return point else: if point < left: lowerCamelCase_ : str = left lowerCamelCase_ : List[str] = point elif point > right: lowerCamelCase_ : Any = right lowerCamelCase_ : Union[str, Any] = point else: if item < current_item: lowerCamelCase_ : Optional[int] = point - 1 else: lowerCamelCase_ : int = point + 1 return None def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None lowerCamelCase_ : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(lowerCAmelCase_): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) elif point > right: return interpolation_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , point - 1) else: return interpolation_search_by_recursion( lowerCAmelCase_ , lowerCAmelCase_ , point + 1 , lowerCAmelCase_) def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' if collection != sorted(lowerCAmelCase_): raise ValueError("Collection must be ascending sorted") return True if __name__ == "__main__": import sys __magic_name__ = 0 if debug == 1: __magic_name__ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3] try: __assert_sorted(collection) except ValueError: sys.exit('''Sequence must be ascending sorted to apply interpolation search''') __magic_name__ = 6_7 __magic_name__ = interpolation_search(collection, target) if result is not None: print(f'''{target} found at positions: {result}''') else: print('''Not found''')
73
from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''', # See all Cvt models at https://huggingface.co/models?filter=cvt } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : List[str] = '''cvt''' def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ): super().__init__(**a_ ) lowerCamelCase_ : Optional[Any] = num_channels lowerCamelCase_ : str = patch_sizes lowerCamelCase_ : List[Any] = patch_stride lowerCamelCase_ : str = patch_padding lowerCamelCase_ : str = embed_dim lowerCamelCase_ : Union[str, Any] = num_heads lowerCamelCase_ : Optional[Any] = depth lowerCamelCase_ : int = mlp_ratio lowerCamelCase_ : Union[str, Any] = attention_drop_rate lowerCamelCase_ : Optional[Any] = drop_rate lowerCamelCase_ : Optional[int] = drop_path_rate lowerCamelCase_ : Union[str, Any] = qkv_bias lowerCamelCase_ : int = cls_token lowerCamelCase_ : int = qkv_projection_method lowerCamelCase_ : int = kernel_qkv lowerCamelCase_ : Optional[Any] = padding_kv lowerCamelCase_ : Optional[int] = stride_kv lowerCamelCase_ : Optional[int] = padding_q lowerCamelCase_ : List[Any] = stride_q lowerCamelCase_ : Any = initializer_range lowerCamelCase_ : int = layer_norm_eps
73
1
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=False , a_=False , a_=False , a_=2 , a_=99 , a_=0 , a_=32 , a_=5 , a_=4 , a_=0.1 , a_=0.1 , a_=512 , a_=12 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_="last" , a_=None , a_=None , ): lowerCamelCase_ : str = parent lowerCamelCase_ : List[str] = batch_size lowerCamelCase_ : List[Any] = seq_length lowerCamelCase_ : int = is_training lowerCamelCase_ : Optional[int] = use_input_lengths lowerCamelCase_ : Optional[Any] = use_token_type_ids lowerCamelCase_ : Dict = use_labels lowerCamelCase_ : Dict = gelu_activation lowerCamelCase_ : Optional[int] = sinusoidal_embeddings lowerCamelCase_ : str = causal lowerCamelCase_ : Optional[int] = asm lowerCamelCase_ : Dict = n_langs lowerCamelCase_ : List[Any] = vocab_size lowerCamelCase_ : List[Any] = n_special lowerCamelCase_ : int = hidden_size lowerCamelCase_ : List[str] = num_hidden_layers lowerCamelCase_ : List[Any] = num_attention_heads lowerCamelCase_ : Optional[int] = hidden_dropout_prob lowerCamelCase_ : Union[str, Any] = attention_probs_dropout_prob lowerCamelCase_ : int = max_position_embeddings lowerCamelCase_ : int = type_vocab_size lowerCamelCase_ : Optional[int] = type_sequence_label_size lowerCamelCase_ : int = initializer_range lowerCamelCase_ : Tuple = num_labels lowerCamelCase_ : int = num_choices lowerCamelCase_ : List[Any] = summary_type lowerCamelCase_ : Optional[Any] = use_proj lowerCamelCase_ : List[Any] = scope def _UpperCamelCase ( self ): lowerCamelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ : List[Any] = None if self.use_input_lengths: lowerCamelCase_ : Optional[int] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCamelCase_ : List[str] = None if self.use_token_type_ids: lowerCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowerCamelCase_ : int = None lowerCamelCase_ : int = None lowerCamelCase_ : Optional[int] = None if self.use_labels: lowerCamelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ : List[str] = ids_tensor([self.batch_size] , 2 ).float() lowerCamelCase_ : int = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ : Tuple = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _UpperCamelCase ( self ): return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): lowerCamelCase_ : Any = FlaubertModel(config=a_ ) model.to(a_ ) model.eval() lowerCamelCase_ : Union[str, Any] = model(a_ , lengths=a_ , langs=a_ ) lowerCamelCase_ : List[str] = model(a_ , langs=a_ ) lowerCamelCase_ : List[str] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): lowerCamelCase_ : Optional[Any] = FlaubertWithLMHeadModel(a_ ) model.to(a_ ) model.eval() lowerCamelCase_ : str = model(a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): lowerCamelCase_ : Tuple = FlaubertForQuestionAnsweringSimple(a_ ) model.to(a_ ) model.eval() lowerCamelCase_ : Dict = model(a_ ) lowerCamelCase_ : List[Any] = model(a_ , start_positions=a_ , end_positions=a_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): lowerCamelCase_ : List[str] = FlaubertForQuestionAnswering(a_ ) model.to(a_ ) model.eval() lowerCamelCase_ : Dict = model(a_ ) lowerCamelCase_ : Any = model( a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , p_mask=a_ , ) lowerCamelCase_ : Any = model( a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , ) ((lowerCamelCase_) ,) : Union[str, Any] = result_with_labels.to_tuple() lowerCamelCase_ : int = model(a_ , start_positions=a_ , end_positions=a_ ) ((lowerCamelCase_) ,) : Dict = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): lowerCamelCase_ : Optional[int] = FlaubertForSequenceClassification(a_ ) model.to(a_ ) model.eval() lowerCamelCase_ : Tuple = model(a_ ) lowerCamelCase_ : Tuple = model(a_ , labels=a_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): lowerCamelCase_ : int = self.num_labels lowerCamelCase_ : List[Any] = FlaubertForTokenClassification(a_ ) model.to(a_ ) model.eval() lowerCamelCase_ : Tuple = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): lowerCamelCase_ : Union[str, Any] = self.num_choices lowerCamelCase_ : int = FlaubertForMultipleChoice(config=a_ ) model.to(a_ ) model.eval() lowerCamelCase_ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase_ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase_ : Dict = model( a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) : Dict = config_and_inputs lowerCamelCase_ : str = { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Optional[int] = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) __UpperCAmelCase : Dict = ( { '''feature-extraction''': FlaubertModel, '''fill-mask''': FlaubertWithLMHeadModel, '''question-answering''': FlaubertForQuestionAnsweringSimple, '''text-classification''': FlaubertForSequenceClassification, '''token-classification''': FlaubertForTokenClassification, '''zero-shot''': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _UpperCamelCase ( self , a_ , a_ , a_=False ): lowerCamelCase_ : List[str] = super()._prepare_for_class(a_ , a_ , return_labels=a_ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowerCamelCase_ : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a_ ) lowerCamelCase_ : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a_ ) return inputs_dict def _UpperCamelCase ( self ): lowerCamelCase_ : int = FlaubertModelTester(self ) lowerCamelCase_ : Optional[int] = ConfigTester(self , config_class=a_ , emb_dim=37 ) def _UpperCamelCase ( self ): self.config_tester.run_common_tests() def _UpperCamelCase ( self ): lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*a_ ) @slow def _UpperCamelCase ( self ): for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ : List[Any] = FlaubertModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @slow @require_torch_gpu def _UpperCamelCase ( self ): lowerCamelCase_ ,lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowerCamelCase_ : List[str] = True lowerCamelCase_ : Dict = model_class(config=a_ ) lowerCamelCase_ : Tuple = self._prepare_for_class(a_ , a_ ) lowerCamelCase_ : List[Any] = torch.jit.trace( a_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(a_ , os.path.join(a_ , "traced_model.pt" ) ) lowerCamelCase_ : Dict = torch.jit.load(os.path.join(a_ , "traced_model.pt" ) , map_location=a_ ) loaded(inputs_dict["input_ids"].to(a_ ) , inputs_dict["attention_mask"].to(a_ ) ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @slow def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" ) lowerCamelCase_ : List[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): lowerCamelCase_ : List[Any] = model(a_ )[0] lowerCamelCase_ : List[str] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , a_ ) lowerCamelCase_ : Union[str, Any] = torch.tensor( [[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1E-4 ) )
73
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __magic_name__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
73
1
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Tuple = FlaxAutoencoderKL @property def _UpperCamelCase ( self ): lowerCamelCase_ : int = 4 lowerCamelCase_ : int = 3 lowerCamelCase_ : Union[str, Any] = (32, 32) lowerCamelCase_ : Any = jax.random.PRNGKey(0 ) lowerCamelCase_ : List[str] = jax.random.uniform(a_ , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def _UpperCamelCase ( self ): lowerCamelCase_ : int = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } lowerCamelCase_ : Optional[int] = self.dummy_input return init_dict, inputs_dict
73
from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Dict = '''EncodecFeatureExtractor''' __UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self , a_ , a_ ): super().__init__(a_ , a_ ) lowerCamelCase_ : Optional[Any] = self.feature_extractor lowerCamelCase_ : Optional[int] = False def _UpperCamelCase ( self , a_=None , a_=None , a_=True ): return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ ) def __call__( self , *a_ , **a_ ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*a_ , **a_ ) lowerCamelCase_ : str = kwargs.pop("audio" , a_ ) lowerCamelCase_ : List[str] = kwargs.pop("sampling_rate" , a_ ) lowerCamelCase_ : Optional[Any] = kwargs.pop("text" , a_ ) if len(a_ ) > 0: lowerCamelCase_ : int = args[0] lowerCamelCase_ : str = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if text is not None: lowerCamelCase_ : Dict = self.tokenizer(a_ , **a_ ) if audio is not None: lowerCamelCase_ : Optional[Any] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ ) if audio is None: return inputs elif text is None: return audio_inputs else: lowerCamelCase_ : Dict = audio_inputs["input_values"] if "padding_mask" in audio_inputs: lowerCamelCase_ : int = audio_inputs["padding_mask"] return inputs def _UpperCamelCase ( self , *a_ , **a_ ): lowerCamelCase_ : Dict = kwargs.pop("audio" , a_ ) lowerCamelCase_ : Optional[Any] = kwargs.pop("padding_mask" , a_ ) if len(a_ ) > 0: lowerCamelCase_ : Optional[int] = args[0] lowerCamelCase_ : Optional[Any] = args[1:] if audio_values is not None: return self._decode_audio(a_ , padding_mask=a_ ) else: return self.tokenizer.batch_decode(*a_ , **a_ ) def _UpperCamelCase ( self , *a_ , **a_ ): return self.tokenizer.decode(*a_ , **a_ ) def _UpperCamelCase ( self , a_ , a_ = None ): lowerCamelCase_ : Any = to_numpy(a_ ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = audio_values.shape if padding_mask is None: return list(a_ ) lowerCamelCase_ : Tuple = to_numpy(a_ ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) lowerCamelCase_ : List[str] = seq_len - padding_mask.shape[-1] lowerCamelCase_ : int = 1 - self.feature_extractor.padding_value lowerCamelCase_ : List[Any] = np.pad(a_ , ((0, 0), (0, difference)) , "constant" , constant_values=a_ ) lowerCamelCase_ : str = audio_values.tolist() for i in range(a_ ): lowerCamelCase_ : Dict = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] lowerCamelCase_ : Dict = sliced_audio.reshape(a_ , -1 ) return audio_values
73
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __magic_name__ = { '''configuration_mask2former''': [ '''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Mask2FormerConfig''', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['''Mask2FormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ '''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Mask2FormerForUniversalSegmentation''', '''Mask2FormerModel''', '''Mask2FormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
73
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if digit_amount > 0: return round(number - int(lowerCAmelCase_) , lowerCAmelCase_) return number - int(lowerCAmelCase_) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.3_45, 1)) print(decimal_isolate(35.3_45, 2)) print(decimal_isolate(35.3_45, 3)) print(decimal_isolate(-14.7_89, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.1_23, 1)) print(decimal_isolate(-14.1_23, 2)) print(decimal_isolate(-14.1_23, 3))
73
1
from queue import PriorityQueue from typing import Any import numpy as np def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ): '''simple docstring''' for nxt, d in graph[v]: if nxt in visited_forward: continue lowerCamelCase_ : List[str] = cst_fwd.get(lowerCAmelCase_ , np.inf) lowerCamelCase_ : Dict = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt)) lowerCamelCase_ : Optional[int] = new_cost_f lowerCamelCase_ : List[str] = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: lowerCamelCase_ : Tuple = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[Any] = -1 lowerCamelCase_ : Tuple = set() lowerCamelCase_ : Dict = set() lowerCamelCase_ : int = {source: 0} lowerCamelCase_ : str = {destination: 0} lowerCamelCase_ : Tuple = {source: None} lowerCamelCase_ : Dict = {destination: None} lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue() lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue() lowerCamelCase_ : List[str] = np.inf queue_forward.put((0, source)) queue_backward.put((0, destination)) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): lowerCamelCase_ ,lowerCamelCase_ : List[Any] = queue_forward.get() visited_forward.add(lowerCAmelCase_) lowerCamelCase_ ,lowerCamelCase_ : str = queue_backward.get() visited_backward.add(lowerCAmelCase_) lowerCamelCase_ : Any = pass_and_relaxation( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) lowerCamelCase_ : Dict = pass_and_relaxation( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: lowerCamelCase_ : Union[str, Any] = shortest_distance return shortest_path_distance __magic_name__ = { '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } __magic_name__ = { '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
73
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ): lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18} lowerCamelCase_ : str = parent lowerCamelCase_ : str = batch_size lowerCamelCase_ : Tuple = num_channels lowerCamelCase_ : Optional[int] = image_size lowerCamelCase_ : List[str] = min_resolution lowerCamelCase_ : Tuple = max_resolution lowerCamelCase_ : Tuple = do_resize lowerCamelCase_ : Dict = size lowerCamelCase_ : List[str] = apply_ocr def _UpperCamelCase ( self ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self ) @property def _UpperCamelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , "do_resize" ) ) self.assertTrue(hasattr(a_ , "size" ) ) self.assertTrue(hasattr(a_ , "apply_ocr" ) ) def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def _UpperCamelCase ( self ): pass def _UpperCamelCase ( self ): # Initialize image_processing lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) self.assertIsInstance(encoding.words , a_ ) self.assertIsInstance(encoding.boxes , a_ ) # Test batched lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase ( self ): # Initialize image_processing lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase ( self ): # Initialize image_processing lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase ( self ): # with apply_OCR = True lowerCamelCase_ : Any = LayoutLMvaImageProcessor() from datasets import load_dataset lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" ) lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" ) lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , a_ ) self.assertListEqual(encoding.boxes , a_ ) # with apply_OCR = False lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ ) lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
73
1
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker __magic_name__ = '''CompVis/stable-diffusion-v1-1''' __magic_name__ = '''CompVis/stable-diffusion-v1-2''' __magic_name__ = '''CompVis/stable-diffusion-v1-3''' __magic_name__ = '''CompVis/stable-diffusion-v1-4''' class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def __init__( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ): super()._init_() lowerCamelCase_ : List[str] = StableDiffusionPipeline.from_pretrained(a_ ) lowerCamelCase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(a_ ) lowerCamelCase_ : Dict = StableDiffusionPipeline.from_pretrained(a_ ) lowerCamelCase_ : Union[str, Any] = StableDiffusionPipeline( vae=a_ , text_encoder=a_ , tokenizer=a_ , unet=a_ , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , requires_safety_checker=a_ , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def _UpperCamelCase ( self ): return {k: getattr(self , a_ ) for k in self.config.keys() if not k.startswith("_" )} def _UpperCamelCase ( self , a_ = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowerCamelCase_ : Optional[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(a_ ) def _UpperCamelCase ( self ): self.enable_attention_slicing(a_ ) @torch.no_grad() def _UpperCamelCase ( self , a_ , a_ = 512 , a_ = 512 , a_ = 50 , a_ = 7.5 , a_ = None , a_ = 1 , a_ = 0.0 , a_ = None , a_ = None , a_ = "pil" , a_ = True , a_ = None , a_ = 1 , **a_ , ): return self.pipea( prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , ) @torch.no_grad() def _UpperCamelCase ( self , a_ , a_ = 512 , a_ = 512 , a_ = 50 , a_ = 7.5 , a_ = None , a_ = 1 , a_ = 0.0 , a_ = None , a_ = None , a_ = "pil" , a_ = True , a_ = None , a_ = 1 , **a_ , ): return self.pipea( prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , ) @torch.no_grad() def _UpperCamelCase ( self , a_ , a_ = 512 , a_ = 512 , a_ = 50 , a_ = 7.5 , a_ = None , a_ = 1 , a_ = 0.0 , a_ = None , a_ = None , a_ = "pil" , a_ = True , a_ = None , a_ = 1 , **a_ , ): return self.pipea( prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , ) @torch.no_grad() def _UpperCamelCase ( self , a_ , a_ = 512 , a_ = 512 , a_ = 50 , a_ = 7.5 , a_ = None , a_ = 1 , a_ = 0.0 , a_ = None , a_ = None , a_ = "pil" , a_ = True , a_ = None , a_ = 1 , **a_ , ): return self.pipea( prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , ) @torch.no_grad() def _UpperCamelCase ( self , a_ , a_ = 512 , a_ = 512 , a_ = 50 , a_ = 7.5 , a_ = None , a_ = 1 , a_ = 0.0 , a_ = None , a_ = None , a_ = "pil" , a_ = True , a_ = None , a_ = 1 , **a_ , ): lowerCamelCase_ : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu" self.to(a_ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 lowerCamelCase_ : Optional[Any] = self.textaimg_sda_a( prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , ) # Get first result from Stable Diffusion Checkpoint v1.2 lowerCamelCase_ : str = self.textaimg_sda_a( prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , ) # Get first result from Stable Diffusion Checkpoint v1.3 lowerCamelCase_ : Tuple = self.textaimg_sda_a( prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , ) # Get first result from Stable Diffusion Checkpoint v1.4 lowerCamelCase_ : Any = self.textaimg_sda_a( prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
73
from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''', '''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''', } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : List[Any] = '''luke''' def __init__( self , a_=5_0267 , a_=50_0000 , a_=768 , a_=256 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=True , a_=None , a_=1 , a_=0 , a_=2 , **a_ , ): super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ ) lowerCamelCase_ : Tuple = vocab_size lowerCamelCase_ : Optional[int] = entity_vocab_size lowerCamelCase_ : Any = hidden_size lowerCamelCase_ : Dict = entity_emb_size lowerCamelCase_ : List[Any] = num_hidden_layers lowerCamelCase_ : int = num_attention_heads lowerCamelCase_ : Union[str, Any] = hidden_act lowerCamelCase_ : Tuple = intermediate_size lowerCamelCase_ : Optional[Any] = hidden_dropout_prob lowerCamelCase_ : Any = attention_probs_dropout_prob lowerCamelCase_ : Optional[Any] = max_position_embeddings lowerCamelCase_ : str = type_vocab_size lowerCamelCase_ : int = initializer_range lowerCamelCase_ : List[Any] = layer_norm_eps lowerCamelCase_ : Optional[int] = use_entity_aware_attention lowerCamelCase_ : str = classifier_dropout
73
1
import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __magic_name__ = '''platform''' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class lowerCAmelCase__ : """simple docstring""" __UpperCAmelCase : List[str] = PegasusConfig __UpperCAmelCase : str = {} __UpperCAmelCase : Any = '''gelu''' def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=False , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_=0.1 , a_=0.1 , a_=20 , a_=2 , a_=1 , a_=0 , ): lowerCamelCase_ : Optional[Any] = parent lowerCamelCase_ : Union[str, Any] = batch_size lowerCamelCase_ : Optional[int] = seq_length lowerCamelCase_ : Optional[Any] = is_training lowerCamelCase_ : Union[str, Any] = use_labels lowerCamelCase_ : Optional[int] = vocab_size lowerCamelCase_ : Optional[int] = hidden_size lowerCamelCase_ : int = num_hidden_layers lowerCamelCase_ : Union[str, Any] = num_attention_heads lowerCamelCase_ : Optional[Any] = intermediate_size lowerCamelCase_ : Union[str, Any] = hidden_dropout_prob lowerCamelCase_ : List[Any] = attention_probs_dropout_prob lowerCamelCase_ : str = max_position_embeddings lowerCamelCase_ : Union[str, Any] = eos_token_id lowerCamelCase_ : Union[str, Any] = pad_token_id lowerCamelCase_ : Tuple = bos_token_id def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) lowerCamelCase_ : Union[str, Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) lowerCamelCase_ : int = np.concatenate([input_ids, eos_tensor] , axis=1 ) lowerCamelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ : str = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowerCamelCase_ : Optional[int] = prepare_pegasus_inputs_dict(a_ , a_ , a_ ) return config, inputs_dict def _UpperCamelCase ( self , a_ , a_ , a_ ): lowerCamelCase_ : Tuple = 20 lowerCamelCase_ : int = model_class_name(a_ ) lowerCamelCase_ : str = model.encode(inputs_dict["input_ids"] ) lowerCamelCase_ ,lowerCamelCase_ : Dict = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) lowerCamelCase_ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , a_ , a_ ) lowerCamelCase_ : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" ) lowerCamelCase_ : Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCamelCase_ : List[Any] = model.decode( decoder_input_ids[:, :-1] , a_ , decoder_attention_mask=a_ , past_key_values=a_ , decoder_position_ids=a_ , ) lowerCamelCase_ : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) lowerCamelCase_ : Optional[int] = model.decode( decoder_input_ids[:, -1:] , a_ , decoder_attention_mask=a_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a_ , ) lowerCamelCase_ : Any = model.decode(a_ , a_ ) lowerCamelCase_ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def _UpperCamelCase ( self , a_ , a_ , a_ ): lowerCamelCase_ : Optional[Any] = 20 lowerCamelCase_ : Dict = model_class_name(a_ ) lowerCamelCase_ : str = model.encode(inputs_dict["input_ids"] ) lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) lowerCamelCase_ : Dict = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) lowerCamelCase_ : List[str] = model.init_cache(decoder_input_ids.shape[0] , a_ , a_ ) lowerCamelCase_ : Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCamelCase_ : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , a_ , decoder_attention_mask=a_ , past_key_values=a_ , decoder_position_ids=a_ , ) lowerCamelCase_ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) lowerCamelCase_ : int = model.decode( decoder_input_ids[:, -1:] , a_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a_ , decoder_position_ids=a_ , ) lowerCamelCase_ : str = model.decode(a_ , a_ , decoder_attention_mask=a_ ) lowerCamelCase_ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ): '''simple docstring''' if attention_mask is None: lowerCamelCase_ : List[str] = np.not_equal(lowerCAmelCase_ , config.pad_token_id).astype(np.inta) if decoder_attention_mask is None: lowerCamelCase_ : Any = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id).astype(np.inta), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ): """simple docstring""" __UpperCAmelCase : List[Any] = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __UpperCAmelCase : Dict = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __UpperCAmelCase : Tuple = True __UpperCAmelCase : Optional[int] = False __UpperCAmelCase : Dict = False __UpperCAmelCase : int = False def _UpperCamelCase ( self ): lowerCamelCase_ : Any = FlaxPegasusModelTester(self ) lowerCamelCase_ : Dict = ConfigTester(self , config_class=a_ ) def _UpperCamelCase ( self ): self.config_tester.run_common_tests() def _UpperCamelCase ( self ): lowerCamelCase_ ,lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(a_ , a_ , a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ ,lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(a_ , a_ , a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ ,lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCamelCase_ : str = self._prepare_for_class(a_ , a_ ) lowerCamelCase_ : List[Any] = model_class(a_ ) @jax.jit def encode_jitted(a_ , a_=None , **a_ ): return model.encode(input_ids=a_ , attention_mask=a_ ) with self.subTest("JIT Enabled" ): lowerCamelCase_ : List[Any] = encode_jitted(**a_ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowerCamelCase_ : int = encode_jitted(**a_ ).to_tuple() self.assertEqual(len(a_ ) , len(a_ ) ) for jitted_output, output in zip(a_ , a_ ): self.assertEqual(jitted_output.shape , output.shape ) def _UpperCamelCase ( self ): lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCamelCase_ : Optional[int] = model_class(a_ ) lowerCamelCase_ : List[str] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] ) lowerCamelCase_ : List[str] = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(a_ , a_ , a_ ): return model.decode( decoder_input_ids=a_ , decoder_attention_mask=a_ , encoder_outputs=a_ , ) with self.subTest("JIT Enabled" ): lowerCamelCase_ : int = decode_jitted(**a_ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowerCamelCase_ : Optional[Any] = decode_jitted(**a_ ).to_tuple() self.assertEqual(len(a_ ) , len(a_ ) ) for jitted_output, output in zip(a_ , a_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _UpperCamelCase ( self ): for model_class_name in self.all_model_classes: lowerCamelCase_ : Tuple = model_class_name.from_pretrained("google/pegasus-large" , from_pt=a_ ) lowerCamelCase_ : Any = np.ones((1, 1) ) lowerCamelCase_ : Any = model(a_ ) self.assertIsNotNone(a_ ) @slow def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" ) lowerCamelCase_ : Dict = PegasusTokenizer.from_pretrained("google/pegasus-xsum" ) lowerCamelCase_ : str = [ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] lowerCamelCase_ : List[str] = [ "California's largest electricity provider has turned off power to hundreds of thousands of customers.", "Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.", ] lowerCamelCase_ : int = tokenizer(a_ , return_tensors="np" , truncation=a_ , max_length=512 , padding=a_ ) lowerCamelCase_ : Dict = model.generate(**a_ , num_beams=2 ).sequences lowerCamelCase_ : List[Any] = tokenizer.batch_decode(a_ , skip_special_tokens=a_ ) assert tgt_text == decoded
73
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int __magic_name__ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class lowerCAmelCase__ ( datasets.BuilderConfig ): """simple docstring""" __UpperCAmelCase : Optional[datasets.Features] = None def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , ): '''simple docstring''' import pyspark def generate_fn(): lowerCamelCase_ : Dict = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id")) for partition_id in partition_order: lowerCamelCase_ : Dict = df_with_partition_id.select("*").where(F"""part_id = {partition_id}""").drop("part_id") lowerCamelCase_ : Dict = partition_df.collect() lowerCamelCase_ : Dict = 0 for row in rows: yield F"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class lowerCAmelCase__ ( _BaseExamplesIterable ): """simple docstring""" def __init__( self , a_ , a_=None , ): lowerCamelCase_ : Dict = df lowerCamelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() ) lowerCamelCase_ : int = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self ): yield from self.generate_examples_fn() def _UpperCamelCase ( self , a_ ): lowerCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(a_ ) return SparkExamplesIterable(self.df , partition_order=a_ ) def _UpperCamelCase ( self , a_ , a_ ): lowerCamelCase_ : Dict = self.split_shard_indices_by_worker(a_ , a_ ) return SparkExamplesIterable(self.df , partition_order=a_ ) @property def _UpperCamelCase ( self ): return len(self.partition_order ) class lowerCAmelCase__ ( datasets.DatasetBuilder ): """simple docstring""" __UpperCAmelCase : Any = SparkConfig def __init__( self , a_ , a_ = None , a_ = None , **a_ , ): import pyspark lowerCamelCase_ : str = pyspark.sql.SparkSession.builder.getOrCreate() lowerCamelCase_ : Optional[Any] = df lowerCamelCase_ : List[Any] = working_dir super().__init__( cache_dir=a_ , config_name=str(self.df.semanticHash() ) , **a_ , ) def _UpperCamelCase ( self ): # Returns the path of the created file. def create_cache_and_write_probe(a_ ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=a_ ) lowerCamelCase_ : Optional[Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(a_ , "a" ) return [probe_file] if self._spark.conf.get("spark.master" , "" ).startswith("local" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: lowerCamelCase_ : List[str] = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" ) def _UpperCamelCase ( self ): return datasets.DatasetInfo(features=self.config.features ) def _UpperCamelCase ( self , a_ ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def _UpperCamelCase ( self , a_ ): import pyspark def get_arrow_batch_size(a_ ): for batch in it: yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} ) lowerCamelCase_ : str = self.df.count() lowerCamelCase_ : List[Any] = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. lowerCamelCase_ : Any = ( self.df.limit(a_ ) .repartition(1 ) .mapInArrow(a_ , "batch_bytes: long" ) .agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) ) .collect()[0] .sample_bytes / sample_num_rows ) lowerCamelCase_ : int = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. lowerCamelCase_ : Union[str, Any] = min(a_ , int(approx_total_size / max_shard_size ) ) lowerCamelCase_ : int = self.df.repartition(a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , ): import pyspark lowerCamelCase_ : str = ParquetWriter if file_format == "parquet" else ArrowWriter lowerCamelCase_ : int = os.path.join(self._working_dir , os.path.basename(a_ ) ) if self._working_dir else fpath lowerCamelCase_ : Optional[Any] = file_format == "parquet" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. lowerCamelCase_ : int = self.config.features lowerCamelCase_ : Any = self._writer_batch_size lowerCamelCase_ : Tuple = self._fs.storage_options def write_arrow(a_ ): # Within the same SparkContext, no two task attempts will share the same attempt ID. lowerCamelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId() lowerCamelCase_ : Optional[int] = next(a_ , a_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , ) lowerCamelCase_ : List[Any] = 0 lowerCamelCase_ : Optional[int] = writer_class( features=a_ , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , ) lowerCamelCase_ : Optional[Any] = pa.Table.from_batches([first_batch] ) writer.write_table(a_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: lowerCamelCase_ ,lowerCamelCase_ : List[str] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) shard_id += 1 lowerCamelCase_ : List[str] = writer_class( features=writer._features , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , ) lowerCamelCase_ : Optional[int] = pa.Table.from_batches([batch] ) writer.write_table(a_ ) if writer._num_bytes > 0: lowerCamelCase_ ,lowerCamelCase_ : Dict = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(a_ ) ): lowerCamelCase_ : str = os.path.join(os.path.dirname(a_ ) , os.path.basename(a_ ) ) shutil.move(a_ , a_ ) lowerCamelCase_ : int = ( self.df.mapInArrow(a_ , "task_id: long, num_examples: long, num_bytes: long" ) .groupBy("task_id" ) .agg( pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def _UpperCamelCase ( self , a_ , a_ = "arrow" , a_ = None , a_ = None , **a_ , ): self._validate_cache_dir() lowerCamelCase_ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(a_ ) lowerCamelCase_ : Dict = not is_remote_filesystem(self._fs ) lowerCamelCase_ : List[str] = os.path.join if is_local else posixpath.join lowerCamelCase_ : Any = "-TTTTT-SSSSS-of-NNNNN" lowerCamelCase_ : List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" lowerCamelCase_ : int = path_join(self._output_dir , a_ ) lowerCamelCase_ : int = 0 lowerCamelCase_ : Optional[Any] = 0 lowerCamelCase_ : int = 0 lowerCamelCase_ : Dict = [] lowerCamelCase_ : Any = [] for task_id, content in self._prepare_split_single(a_ , a_ , a_ ): ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) : Tuple = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(a_ ) lowerCamelCase_ : Dict = total_num_examples lowerCamelCase_ : Any = total_num_bytes # should rename everything at the end logger.debug(F"""Renaming {total_shards} shards.""" ) if total_shards > 1: lowerCamelCase_ : List[Any] = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. lowerCamelCase_ : Any = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( a_ , a_ , a_ , ): rename( a_ , fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , F"""{global_shard_id:05d}""" ).replace("NNNNN" , F"""{total_shards:05d}""" ) , ) lowerCamelCase_ : Optional[int] = [] lowerCamelCase_ : Dict = 0 for i in range(len(a_ ) ): lowerCamelCase_ ,lowerCamelCase_ : Tuple = task_id_and_num_shards[i] for shard_id in range(a_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(a_ , len(a_ ) ).map(lambda a_ : _rename_shard(*a_ ) ).collect() else: # don't use any pattern lowerCamelCase_ : int = 0 lowerCamelCase_ : Optional[int] = task_id_and_num_shards[0][0] self._rename( fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace(a_ , "" ) , ) def _UpperCamelCase ( self , a_ , ): return SparkExamplesIterable(self.df )
73
1
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def __init__( self , a_ , a_ , a_ , a_ , ): super().__init__() lowerCamelCase_ : List[str] = value_function lowerCamelCase_ : Any = unet lowerCamelCase_ : int = scheduler lowerCamelCase_ : Optional[int] = env lowerCamelCase_ : Optional[Any] = env.get_dataset() lowerCamelCase_ : Optional[Any] = {} for key in self.data.keys(): try: lowerCamelCase_ : Tuple = self.data[key].mean() except: # noqa: E722 pass lowerCamelCase_ : Tuple = {} for key in self.data.keys(): try: lowerCamelCase_ : List[Any] = self.data[key].std() except: # noqa: E722 pass lowerCamelCase_ : Optional[Any] = env.observation_space.shape[0] lowerCamelCase_ : int = env.action_space.shape[0] def _UpperCamelCase ( self , a_ , a_ ): return (x_in - self.means[key]) / self.stds[key] def _UpperCamelCase ( self , a_ , a_ ): return x_in * self.stds[key] + self.means[key] def _UpperCamelCase ( self , a_ ): if type(a_ ) is dict: return {k: self.to_torch(a_ ) for k, v in x_in.items()} elif torch.is_tensor(a_ ): return x_in.to(self.unet.device ) return torch.tensor(a_ , device=self.unet.device ) def _UpperCamelCase ( self , a_ , a_ , a_ ): for key, val in cond.items(): lowerCamelCase_ : Any = val.clone() return x_in def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ): lowerCamelCase_ : Dict = x.shape[0] lowerCamelCase_ : List[str] = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model lowerCamelCase_ : str = torch.full((batch_size,) , a_ , device=self.unet.device , dtype=torch.long ) for _ in range(a_ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models lowerCamelCase_ : Dict = self.value_function(x.permute(0 , 2 , 1 ) , a_ ).sample lowerCamelCase_ : List[Any] = torch.autograd.grad([y.sum()] , [x] )[0] lowerCamelCase_ : Any = self.scheduler._get_variance(a_ ) lowerCamelCase_ : str = torch.exp(0.5 * posterior_variance ) lowerCamelCase_ : str = model_std * grad lowerCamelCase_ : Tuple = 0 lowerCamelCase_ : List[str] = x.detach() lowerCamelCase_ : Optional[Any] = x + scale * grad lowerCamelCase_ : Union[str, Any] = self.reset_xa(a_ , a_ , self.action_dim ) lowerCamelCase_ : Tuple = self.unet(x.permute(0 , 2 , 1 ) , a_ ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg lowerCamelCase_ : Dict = self.scheduler.step(a_ , a_ , a_ , predict_epsilon=a_ )["prev_sample"] # apply conditions to the trajectory (set the initial state) lowerCamelCase_ : Optional[Any] = self.reset_xa(a_ , a_ , self.action_dim ) lowerCamelCase_ : Dict = self.to_torch(a_ ) return x, y def __call__( self , a_ , a_=64 , a_=32 , a_=2 , a_=0.1 ): # normalize the observations and create batch dimension lowerCamelCase_ : int = self.normalize(a_ , "observations" ) lowerCamelCase_ : Optional[Any] = obs[None].repeat(a_ , axis=0 ) lowerCamelCase_ : Dict = {0: self.to_torch(a_ )} lowerCamelCase_ : Tuple = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) lowerCamelCase_ : Optional[Any] = randn_tensor(a_ , device=self.unet.device ) lowerCamelCase_ : Any = self.reset_xa(a_ , a_ , self.action_dim ) lowerCamelCase_ : Union[str, Any] = self.to_torch(a_ ) # run the diffusion process lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = self.run_diffusion(a_ , a_ , a_ , a_ ) # sort output trajectories by value lowerCamelCase_ : int = y.argsort(0 , descending=a_ ).squeeze() lowerCamelCase_ : str = x[sorted_idx] lowerCamelCase_ : Dict = sorted_values[:, :, : self.action_dim] lowerCamelCase_ : int = actions.detach().cpu().numpy() lowerCamelCase_ : Dict = self.de_normalize(a_ , key="actions" ) # select the action with the highest value if y is not None: lowerCamelCase_ : Optional[int] = 0 else: # if we didn't run value guiding, select a random action lowerCamelCase_ : Optional[int] = np.random.randint(0 , a_ ) lowerCamelCase_ : Dict = denorm_actions[selected_index, 0] return denorm_actions
73
from queue import PriorityQueue from typing import Any import numpy as np def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ): '''simple docstring''' for nxt, d in graph[v]: if nxt in visited_forward: continue lowerCamelCase_ : List[str] = cst_fwd.get(lowerCAmelCase_ , np.inf) lowerCamelCase_ : Dict = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt)) lowerCamelCase_ : Optional[int] = new_cost_f lowerCamelCase_ : List[str] = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: lowerCamelCase_ : Tuple = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[Any] = -1 lowerCamelCase_ : Tuple = set() lowerCamelCase_ : Dict = set() lowerCamelCase_ : int = {source: 0} lowerCamelCase_ : str = {destination: 0} lowerCamelCase_ : Tuple = {source: None} lowerCamelCase_ : Dict = {destination: None} lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue() lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue() lowerCamelCase_ : List[str] = np.inf queue_forward.put((0, source)) queue_backward.put((0, destination)) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): lowerCamelCase_ ,lowerCamelCase_ : List[Any] = queue_forward.get() visited_forward.add(lowerCAmelCase_) lowerCamelCase_ ,lowerCamelCase_ : str = queue_backward.get() visited_backward.add(lowerCAmelCase_) lowerCamelCase_ : Any = pass_and_relaxation( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) lowerCamelCase_ : Dict = pass_and_relaxation( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: lowerCamelCase_ : Union[str, Any] = shortest_distance return shortest_path_distance __magic_name__ = { '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } __magic_name__ = { '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
73
1
from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None): '''simple docstring''' if version.parse(hfh.__version__).release < version.parse("0.11.0").release: # old versions of hfh don't url-encode the file path lowerCamelCase_ : List[Any] = quote(lowerCAmelCase_) return hfh.hf_hub_url(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" , revision=lowerCAmelCase_)
73
from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Dict = '''ctrl''' __UpperCAmelCase : Dict = ['''past_key_values'''] __UpperCAmelCase : int = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , a_=24_6534 , a_=256 , a_=1280 , a_=8192 , a_=48 , a_=16 , a_=0.1 , a_=0.1 , a_=1E-6 , a_=0.02 , a_=True , **a_ , ): lowerCamelCase_ : Dict = vocab_size lowerCamelCase_ : Any = n_positions lowerCamelCase_ : Optional[int] = n_embd lowerCamelCase_ : List[Any] = n_layer lowerCamelCase_ : Union[str, Any] = n_head lowerCamelCase_ : str = dff lowerCamelCase_ : Tuple = resid_pdrop lowerCamelCase_ : Any = embd_pdrop lowerCamelCase_ : Dict = layer_norm_epsilon lowerCamelCase_ : Tuple = initializer_range lowerCamelCase_ : Any = use_cache super().__init__(**a_ )
73
1
def __magic_name__ ( lowerCAmelCase_ = 5000_0000): '''simple docstring''' lowerCamelCase_ : int = set() lowerCamelCase_ : List[str] = int((limit - 24) ** (1 / 2)) lowerCamelCase_ : Any = set(range(3 , prime_square_limit + 1 , 2)) primes.add(2) for p in range(3 , prime_square_limit + 1 , 2): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowerCAmelCase_))) for primea in primes: lowerCamelCase_ : Union[str, Any] = primea * primea for primea in primes: lowerCamelCase_ : Dict = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: lowerCamelCase_ : int = primea * primea * primea * primea lowerCamelCase_ : int = square + cube + tetr if total >= limit: break ret.add(lowerCAmelCase_) return len(lowerCAmelCase_) if __name__ == "__main__": print(f'''{solution() = }''')
73
import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process __magic_name__ = logging.getLogger(__name__) __magic_name__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) __magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCAmelCase__ : """simple docstring""" __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) }, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCamelCase )}, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) }, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, ) __UpperCAmelCase : bool = field( default=__lowerCamelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, ) __UpperCAmelCase : str = field( default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, ) __UpperCAmelCase : bool = field( default=__lowerCamelCase, metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) }, ) def _UpperCamelCase ( self ): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class lowerCAmelCase__ : """simple docstring""" __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __UpperCAmelCase : Optional[str] = field(default=__lowerCamelCase, metadata={'''help''': '''The input training data file (a text file).'''} ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''}, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''}, ) __UpperCAmelCase : bool = field( default=__lowerCamelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) __UpperCAmelCase : Optional[int] = field( default=5, metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' }, ) __UpperCAmelCase : Optional[int] = field( default=__lowerCamelCase, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated. Default to the max input length of the model.''' ) }, ) __UpperCAmelCase : Optional[int] = field( default=__lowerCamelCase, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, ) __UpperCAmelCase : float = field( default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} ) __UpperCAmelCase : bool = field( default=__lowerCamelCase, metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) }, ) def _UpperCamelCase ( self ): if self.train_file is not None: lowerCamelCase_ : str = self.train_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: lowerCamelCase_ : Union[str, Any] = self.validation_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' with open(lowerCAmelCase_ , "r" , encoding="utf-8") as f: lowerCamelCase_ : Tuple = [json.loads(lowerCAmelCase_) for line in f.read().splitlines() if (len(lowerCAmelCase_) > 0 and not line.isspace())] assert len(lowerCAmelCase_) == len(lowerCAmelCase_) lowerCamelCase_ : Any = {c: dataset[c] for c in dataset.column_names} lowerCamelCase_ : List[Any] = refs return Dataset.from_dict(lowerCAmelCase_) def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : str = parser.parse_args_into_dataclasses() # Detecting last checkpoint. lowerCamelCase_ : List[str] = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: lowerCamelCase_ : Dict = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome.") elif last_checkpoint is not None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch.") # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""") # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , lowerCAmelCase_) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowerCamelCase_ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name) if "validation" not in datasets.keys(): lowerCamelCase_ : Any = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , ) lowerCamelCase_ : Optional[int] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , ) else: lowerCamelCase_ : Dict = {} if data_args.train_file is not None: lowerCamelCase_ : str = data_args.train_file if data_args.validation_file is not None: lowerCamelCase_ : Any = data_args.validation_file lowerCamelCase_ : Any = data_args.train_file.split(".")[-1] if extension == "txt": lowerCamelCase_ : List[str] = "text" lowerCamelCase_ : Dict = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCamelCase_ : Optional[Any] = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_) elif model_args.model_name_or_path: lowerCamelCase_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_) else: lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(F"""Overriding config: {model_args.config_overrides}""") config.update_from_string(model_args.config_overrides) logger.info(F"""New config: {config}""") lowerCamelCase_ : List[str] = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: lowerCamelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_) elif model_args.model_name_or_path: lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name.") if model_args.model_name_or_path: lowerCamelCase_ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch") lowerCamelCase_ : Dict = AutoModelForMaskedLM.from_config(lowerCAmelCase_) model.resize_token_embeddings(len(lowerCAmelCase_)) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: lowerCamelCase_ : Optional[Any] = datasets["train"].column_names else: lowerCamelCase_ : Dict = datasets["validation"].column_names lowerCamelCase_ : Union[str, Any] = "text" if "text" in column_names else column_names[0] lowerCamelCase_ : Optional[Any] = "max_length" if data_args.pad_to_max_length else False def tokenize_function(lowerCAmelCase_): # Remove empty lines lowerCamelCase_ : str = [line for line in examples["text"] if len(lowerCAmelCase_) > 0 and not line.isspace()] return tokenizer(examples["text"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length) lowerCamelCase_ : str = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: lowerCamelCase_ : List[Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file) if data_args.validation_ref_file is not None: lowerCamelCase_ : List[str] = add_chinese_references( tokenized_datasets["validation"] , data_args.validation_ref_file) # If we have ref files, need to avoid it removed by trainer lowerCamelCase_ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file if has_ref: lowerCamelCase_ : Union[str, Any] = False # Data collator # This one will take care of randomly masking the tokens. lowerCamelCase_ : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability) # Initialize our Trainer lowerCamelCase_ : int = Trainer( model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , ) # Training if training_args.do_train: if last_checkpoint is not None: lowerCamelCase_ : Dict = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path): lowerCamelCase_ : Dict = model_args.model_name_or_path else: lowerCamelCase_ : int = None lowerCamelCase_ : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_) trainer.save_model() # Saves the tokenizer too for easy upload lowerCamelCase_ : Tuple = os.path.join(training_args.output_dir , "train_results.txt") if trainer.is_world_process_zero(): with open(lowerCAmelCase_ , "w") as writer: logger.info("***** Train results *****") for key, value in sorted(train_result.metrics.items()): logger.info(F""" {key} = {value}""") writer.write(F"""{key} = {value}\n""") # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json")) # Evaluation lowerCamelCase_ : Dict = {} if training_args.do_eval: logger.info("*** Evaluate ***") lowerCamelCase_ : Tuple = trainer.evaluate() lowerCamelCase_ : str = math.exp(eval_output["eval_loss"]) lowerCamelCase_ : Tuple = perplexity lowerCamelCase_ : int = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt") if trainer.is_world_process_zero(): with open(lowerCAmelCase_ , "w") as writer: logger.info("***** Eval results *****") for key, value in sorted(results.items()): logger.info(F""" {key} = {value}""") writer.write(F"""{key} = {value}\n""") return results def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' main() if __name__ == "__main__": main()
73
1
from math import factorial, pi def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ = 30): '''simple docstring''' if not isinstance(lowerCAmelCase_ , (int, float)): raise ValueError("maclaurin_sin() requires either an int or float for theta") if not isinstance(lowerCAmelCase_ , lowerCAmelCase_) or accuracy <= 0: raise ValueError("maclaurin_sin() requires a positive int for accuracy") lowerCamelCase_ : str = float(lowerCAmelCase_) lowerCamelCase_ : Optional[int] = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1) for r in range(lowerCAmelCase_)) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ = 30): '''simple docstring''' if not isinstance(lowerCAmelCase_ , (int, float)): raise ValueError("maclaurin_cos() requires either an int or float for theta") if not isinstance(lowerCAmelCase_ , lowerCAmelCase_) or accuracy <= 0: raise ValueError("maclaurin_cos() requires a positive int for accuracy") lowerCamelCase_ : Dict = float(lowerCAmelCase_) lowerCamelCase_ : List[str] = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r) for r in range(lowerCAmelCase_)) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(1_0)) print(maclaurin_sin(-1_0)) print(maclaurin_sin(1_0, 1_5)) print(maclaurin_sin(-1_0, 1_5)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(1_0, 1_5)) print(maclaurin_cos(-1_0, 1_5))
73
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class lowerCAmelCase__ : """simple docstring""" # setable values __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Optional[jnp.ndarray] = None __UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def _UpperCamelCase ( cls ): return cls() @dataclass class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : KarrasVeSchedulerState class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase ): """simple docstring""" @property def _UpperCamelCase ( self ): return True @register_to_config def __init__( self , a_ = 0.02 , a_ = 100 , a_ = 1.0_07 , a_ = 80 , a_ = 0.05 , a_ = 50 , ): pass def _UpperCamelCase ( self ): return KarrasVeSchedulerState.create() def _UpperCamelCase ( self , a_ , a_ , a_ = () ): lowerCamelCase_ : List[Any] = jnp.arange(0 , a_ )[::-1].copy() lowerCamelCase_ : List[str] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=a_ , schedule=jnp.array(a_ , dtype=jnp.floataa ) , timesteps=a_ , ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , ): if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase_ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: lowerCamelCase_ : Optional[int] = 0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase_ : Union[str, Any] = random.split(a_ , num=1 ) lowerCamelCase_ : str = self.config.s_noise * random.normal(key=a_ , shape=sample.shape ) lowerCamelCase_ : List[str] = sigma + gamma * sigma lowerCamelCase_ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ = True , ): lowerCamelCase_ : List[str] = sample_hat + sigma_hat * model_output lowerCamelCase_ : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat lowerCamelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ): lowerCamelCase_ : Optional[Any] = sample_prev + sigma_prev * model_output lowerCamelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev lowerCamelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ): raise NotImplementedError()
73
1
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __magic_name__ = '''2.13.1''' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('''3.7'''): raise ImportWarning( '''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.''' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( '''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n''' '''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.''' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __magic_name__ = concatenate_datasets __magic_name__ = DownloadConfig __magic_name__ = DownloadManager __magic_name__ = DownloadMode __magic_name__ = DownloadConfig __magic_name__ = DownloadMode __magic_name__ = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
73
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Any = StableDiffusionDiffEditPipeline __UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} __UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} __UpperCAmelCase : List[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __UpperCAmelCase : List[str] = frozenset([] ) def _UpperCamelCase ( self ): torch.manual_seed(0 ) lowerCamelCase_ : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , ) lowerCamelCase_ : str = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , ) lowerCamelCase_ : Dict = DDIMInverseScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_zero=a_ , ) torch.manual_seed(0 ) lowerCamelCase_ : List[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCamelCase_ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) lowerCamelCase_ : Optional[Any] = CLIPTextModel(a_ ) lowerCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowerCamelCase_ : Optional[Any] = { "unet": unet, "scheduler": scheduler, "inverse_scheduler": inverse_scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def _UpperCamelCase ( self , a_ , a_=0 ): lowerCamelCase_ : str = floats_tensor((1, 16, 16) , rng=random.Random(a_ ) ).to(a_ ) lowerCamelCase_ : List[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a_ ) ).to(a_ ) if str(a_ ).startswith("mps" ): lowerCamelCase_ : List[Any] = torch.manual_seed(a_ ) else: lowerCamelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(a_ ) lowerCamelCase_ : Tuple = { "prompt": "a dog and a newt", "mask_image": mask, "image_latents": latents, "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _UpperCamelCase ( self , a_ , a_=0 ): lowerCamelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ ) lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase_ : Any = Image.fromarray(np.uinta(a_ ) ).convert("RGB" ) if str(a_ ).startswith("mps" ): lowerCamelCase_ : Tuple = torch.manual_seed(a_ ) else: lowerCamelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(a_ ) lowerCamelCase_ : int = { "image": image, "source_prompt": "a cat and a frog", "target_prompt": "a dog and a newt", "generator": generator, "num_inference_steps": 2, "num_maps_per_mask": 2, "mask_encode_strength": 1.0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _UpperCamelCase ( self , a_ , a_=0 ): lowerCamelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ ) lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase_ : Optional[int] = Image.fromarray(np.uinta(a_ ) ).convert("RGB" ) if str(a_ ).startswith("mps" ): lowerCamelCase_ : Optional[int] = torch.manual_seed(a_ ) else: lowerCamelCase_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ ) lowerCamelCase_ : Union[str, Any] = { "image": image, "prompt": "a cat and a frog", "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "decode_latents": True, "output_type": "numpy", } return inputs def _UpperCamelCase ( self ): if not hasattr(self.pipeline_class , "_optional_components" ): return lowerCamelCase_ : List[Any] = self.get_dummy_components() lowerCamelCase_ : int = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(a_ , a_ , a_ ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) lowerCamelCase_ : int = self.get_dummy_inputs(a_ ) lowerCamelCase_ : int = pipe(**a_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(a_ ) lowerCamelCase_ : Optional[int] = self.pipeline_class.from_pretrained(a_ ) pipe_loaded.to(a_ ) pipe_loaded.set_progress_bar_config(disable=a_ ) for optional_component in pipe._optional_components: self.assertTrue( getattr(a_ , a_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , ) lowerCamelCase_ : List[str] = self.get_dummy_inputs(a_ ) lowerCamelCase_ : Optional[int] = pipe_loaded(**a_ )[0] lowerCamelCase_ : Optional[int] = np.abs(output - output_loaded ).max() self.assertLess(a_ , 1E-4 ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = "cpu" lowerCamelCase_ : int = self.get_dummy_components() lowerCamelCase_ : List[Any] = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : Any = self.get_dummy_mask_inputs(a_ ) lowerCamelCase_ : int = pipe.generate_mask(**a_ ) lowerCamelCase_ : List[Any] = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) lowerCamelCase_ : List[str] = np.array([0] * 9 ) lowerCamelCase_ : Optional[int] = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_ , 1E-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = "cpu" lowerCamelCase_ : Union[str, Any] = self.get_dummy_components() lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : Dict = self.get_dummy_inversion_inputs(a_ ) lowerCamelCase_ : Dict = pipe.invert(**a_ ).images lowerCamelCase_ : str = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowerCamelCase_ : Dict = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_ , 1E-3 ) def _UpperCamelCase ( self ): super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = "cpu" lowerCamelCase_ : int = self.get_dummy_components() lowerCamelCase_ : int = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"} lowerCamelCase_ : Optional[Any] = DPMSolverMultistepScheduler(**a_ ) lowerCamelCase_ : List[str] = DPMSolverMultistepInverseScheduler(**a_ ) lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : int = self.get_dummy_inversion_inputs(a_ ) lowerCamelCase_ : str = pipe.invert(**a_ ).images lowerCamelCase_ : int = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowerCamelCase_ : Union[str, Any] = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) lowerCamelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_ , 1E-3 ) @require_torch_gpu @slow class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def _UpperCamelCase ( cls ): lowerCamelCase_ : Dict = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" ) lowerCamelCase_ : int = raw_image.convert("RGB" ).resize((768, 768) ) lowerCamelCase_ : List[Any] = raw_image def _UpperCamelCase ( self ): lowerCamelCase_ : Dict = torch.manual_seed(0 ) lowerCamelCase_ : Tuple = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa ) lowerCamelCase_ : str = DDIMScheduler.from_config(pipe.scheduler.config ) lowerCamelCase_ : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : str = "a bowl of fruit" lowerCamelCase_ : Optional[int] = "a bowl of pears" lowerCamelCase_ : List[Any] = pipe.generate_mask( image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , ) lowerCamelCase_ : str = pipe.invert( prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ ).latents lowerCamelCase_ : List[str] = pipe( prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0] lowerCamelCase_ : List[str] = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 ) lowerCamelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa ) lowerCamelCase_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) lowerCamelCase_ : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : Any = "a bowl of fruit" lowerCamelCase_ : Dict = "a bowl of pears" lowerCamelCase_ : Optional[Any] = pipe.generate_mask( image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , ) lowerCamelCase_ : str = pipe.invert( prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ , num_inference_steps=25 , ).latents lowerCamelCase_ : Any = pipe( prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0] lowerCamelCase_ : List[str] = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
73
1
class lowerCAmelCase__ : """simple docstring""" def __init__( self ): lowerCamelCase_ : Tuple = {} def _UpperCamelCase ( self ): print(self.vertex ) for i in self.vertex: print(a_ , " -> " , " -> ".join([str(a_ ) for j in self.vertex[i]] ) ) def _UpperCamelCase ( self , a_ , a_ ): # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(a_ ) else: # else make a new vertex lowerCamelCase_ : Any = [to_vertex] def _UpperCamelCase ( self ): # visited array for storing already visited nodes lowerCamelCase_ : Optional[Any] = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(a_ , a_ ) def _UpperCamelCase ( self , a_ , a_ ): # mark start vertex as visited lowerCamelCase_ : Dict = True print(a_ , end=" " ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(a_ , a_ ) if __name__ == "__main__": __magic_name__ = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print('''DFS:''') g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
73
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase ( self ): lowerCamelCase_ : int = ["a", "b", "c"] # Defaults to last layer if both are None lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , a_ , a_ ) self.assertEqual(a_ , ["c"] ) self.assertEqual(a_ , [2] ) # Out indices set to match out features lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ ) self.assertEqual(a_ , ["a", "c"] ) self.assertEqual(a_ , [0, 2] ) # Out features set to match out indices lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ ) self.assertEqual(a_ , ["a", "c"] ) self.assertEqual(a_ , [0, 2] ) # Out features selected from negative indices lowerCamelCase_ ,lowerCamelCase_ : Dict = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ ) self.assertEqual(a_ , ["a", "c"] ) self.assertEqual(a_ , [-3, -1] ) def _UpperCamelCase ( self ): # Stage names must be set with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ ) # Out features must be a list with self.assertRaises(a_ ): verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] ) # Out features must be a subset of stage names with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] ) # Out indices must be a list or tuple with self.assertRaises(a_ ): verify_out_features_out_indices(a_ , 0 , ["a", "b"] ) # Out indices must be a subset of stage names with self.assertRaises(a_ ): verify_out_features_out_indices(a_ , (0, 1) , ["a"] ) # Out features and out indices must be the same length with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] ) # Out features should match out indices with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] ) # Out features and out indices should be in order with self.assertRaises(a_ ): verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] ) # Check passes with valid inputs verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = BackboneMixin() lowerCamelCase_ : List[Any] = ["a", "b", "c"] lowerCamelCase_ : Optional[int] = ["a", "c"] lowerCamelCase_ : Dict = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly lowerCamelCase_ : Union[str, Any] = ["a", "b"] self.assertEqual(backbone.out_features , ["a", "b"] ) self.assertEqual(backbone.out_indices , [0, 1] ) lowerCamelCase_ : str = [-3, -1] self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [-3, -1] )
73
1
# Algorithm for the pigeonhole sorting def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Dict = min(lowerCAmelCase_) # min() finds the minimum value lowerCamelCase_ : str = max(lowerCAmelCase_) # max() finds the maximum value lowerCamelCase_ : Dict = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size lowerCamelCase_ : int = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. lowerCamelCase_ : Optional[Any] = 0 for count in range(lowerCAmelCase_): while holes[count] > 0: holes[count] -= 1 lowerCamelCase_ : Optional[int] = count + min_val i += 1 def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : Union[str, Any] = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(lowerCAmelCase_) print("Sorted order is:" , " ".join(lowerCAmelCase_)) if __name__ == "__main__": main()
73
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils ) __UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] ) __UpperCAmelCase : Tuple = ['''accelerate''', '''launch'''] __UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate''' __UpperCAmelCase : int = '''default_config.yaml''' __UpperCAmelCase : Tuple = config_folder / config_file __UpperCAmelCase : int = config_folder / '''_default_config.yaml''' __UpperCAmelCase : int = Path('''tests/test_configs''' ) @classmethod def _UpperCamelCase ( cls ): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def _UpperCamelCase ( cls ): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def _UpperCamelCase ( self ): for config in sorted(self.test_config_path.glob("**/*.yaml" ) ): with self.subTest(config_file=a_ ): execute_subprocess_async( self.base_cmd + ["--config_file", str(a_ ), self.test_file_path] , env=os.environ.copy() ) def _UpperCamelCase ( self ): execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() ) class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase : List[Any] = '''test-tpu''' __UpperCAmelCase : Tuple = '''us-central1-a''' __UpperCAmelCase : Tuple = '''ls''' __UpperCAmelCase : str = ['''accelerate''', '''tpu-config'''] __UpperCAmelCase : Dict = '''cd /usr/share''' __UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh''' __UpperCAmelCase : Dict = '''Running gcloud compute tpus tpu-vm ssh''' def _UpperCamelCase ( self ): lowerCamelCase_ : Any = run_command( self.cmd + ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = run_command( self.cmd + [ "--config_file", "tests/test_configs/0_12_0.yaml", "--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug", ] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Union[str, Any] = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a_ ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Any = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = run_command( self.cmd + [ "--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--command", "echo \"Hello World\"", "--debug", ] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Dict = run_command( self.cmd + [ "--config_file", "tests/test_configs/0_12_0.yaml", "--command_file", self.command_file, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug", ] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : str = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Any = run_command( self.cmd + [ "--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--accelerate_version", "12.0.0", "--debug", ] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
73
1
from cva import destroyAllWindows, imread, imshow, waitKey def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ ,lowerCamelCase_ : Tuple = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(lowerCAmelCase_): for j in range(lowerCAmelCase_): lowerCamelCase_ : List[Any] = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image __magic_name__ = imread('''image_data/lena.jpg''', 1) # convert to its negative __magic_name__ = convert_to_negative(img) # show result image imshow('''negative of original image''', img) waitKey(0) destroyAllWindows()
73
import inspect from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel, VQModel from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def __init__( self , a_ , a_ , a_ ): super().__init__() self.register_modules(vqvae=a_ , unet=a_ , scheduler=a_ ) @torch.no_grad() def __call__( self , a_ = 1 , a_ = None , a_ = 0.0 , a_ = 50 , a_ = "pil" , a_ = True , **a_ , ): lowerCamelCase_ : Optional[Any] = randn_tensor( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , ) lowerCamelCase_ : Optional[int] = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowerCamelCase_ : Optional[int] = latents * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(a_ ) # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature lowerCamelCase_ : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCamelCase_ : Optional[int] = {} if accepts_eta: lowerCamelCase_ : Optional[int] = eta for t in self.progress_bar(self.scheduler.timesteps ): lowerCamelCase_ : Dict = self.scheduler.scale_model_input(a_ , a_ ) # predict the noise residual lowerCamelCase_ : Optional[Any] = self.unet(a_ , a_ ).sample # compute the previous noisy sample x_t -> x_t-1 lowerCamelCase_ : List[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample # decode the image latents with the VAE lowerCamelCase_ : str = self.vqvae.decode(a_ ).sample lowerCamelCase_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 ) lowerCamelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCamelCase_ : Optional[Any] = self.numpy_to_pil(a_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=a_ )
73
1
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class lowerCAmelCase__ ( datasets.BeamBasedBuilder ): """simple docstring""" def _UpperCamelCase ( self ): return datasets.DatasetInfo( features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=a_ , ) def _UpperCamelCase ( self , a_ , a_ ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )] def _UpperCamelCase ( self , a_ , a_ ): import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(a_ ) class lowerCAmelCase__ ( datasets.BeamBasedBuilder ): """simple docstring""" def _UpperCamelCase ( self ): return datasets.DatasetInfo( features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=a_ , ) def _UpperCamelCase ( self , a_ , a_ ): return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} ) ] def _UpperCamelCase ( self , a_ , a_ ): import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(a_ ) def __magic_name__ ( ): '''simple docstring''' return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"])] def __magic_name__ ( ): '''simple docstring''' return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"])] class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" @require_beam def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: lowerCamelCase_ : List[Any] = DummyBeamDataset(cache_dir=a_ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(a_ , builder.name , "default" , "0.0.0" , F"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) lowerCamelCase_ : List[Any] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , a_ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , a_ ) self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(a_ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def _UpperCamelCase ( self ): import apache_beam as beam lowerCamelCase_ : Optional[Any] = beam.io.parquetio.WriteToParquet lowerCamelCase_ : Union[str, Any] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: lowerCamelCase_ : Tuple = DummyBeamDataset(cache_dir=a_ , beam_runner="DirectRunner" ) with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock: lowerCamelCase_ : Optional[int] = partial(a_ , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( a_ , builder.name , "default" , "0.0.0" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertTrue( os.path.exists( os.path.join( a_ , builder.name , "default" , "0.0.0" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) lowerCamelCase_ : Optional[Any] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , a_ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , a_ ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) ) self.assertTrue( os.path.exists(os.path.join(a_ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def _UpperCamelCase ( self ): with tempfile.TemporaryDirectory() as tmp_cache_dir: lowerCamelCase_ : Optional[int] = DummyBeamDataset(cache_dir=a_ ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def _UpperCamelCase ( self ): lowerCamelCase_ : int = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: lowerCamelCase_ : str = NestedBeamDataset(cache_dir=a_ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(a_ , builder.name , "default" , "0.0.0" , F"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) ) lowerCamelCase_ : List[Any] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , a_ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , a_ ) self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(a_ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset
73
import re def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' if len(re.findall("[ATCG]" , lowerCAmelCase_)) != len(lowerCAmelCase_): raise ValueError("Invalid Strand") return dna.translate(dna.maketrans("ATCG" , "TAGC")) if __name__ == "__main__": import doctest doctest.testmod()
73
1
from __future__ import annotations class lowerCAmelCase__ : """simple docstring""" def __init__( self , a_ ): lowerCamelCase_ : List[str] = data lowerCamelCase_ : Node | None = None lowerCamelCase_ : Node | None = None def __magic_name__ ( lowerCAmelCase_): # In Order traversal of the tree '''simple docstring''' if tree: display(tree.left) print(tree.data) display(tree.right) def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' return 1 + max(depth_of_tree(tree.left) , depth_of_tree(tree.right)) if tree else 0 def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left) and is_full_binary_tree(tree.right) else: return not tree.left and not tree.right def __magic_name__ ( ): # Main function for testing. '''simple docstring''' lowerCamelCase_ : Any = Node(1) lowerCamelCase_ : Dict = Node(2) lowerCamelCase_ : Any = Node(3) lowerCamelCase_ : str = Node(4) lowerCamelCase_ : Tuple = Node(5) lowerCamelCase_ : Optional[Any] = Node(6) lowerCamelCase_ : Tuple = Node(7) lowerCamelCase_ : str = Node(8) lowerCamelCase_ : str = Node(9) print(is_full_binary_tree(lowerCAmelCase_)) print(depth_of_tree(lowerCAmelCase_)) print("Tree is: ") display(lowerCAmelCase_) if __name__ == "__main__": main()
73
from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False): '''simple docstring''' if radian_mode: return [magnitude * cos(lowerCAmelCase_), magnitude * sin(lowerCAmelCase_)] return [magnitude * cos(radians(lowerCAmelCase_)), magnitude * sin(radians(lowerCAmelCase_))] def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10**-1): '''simple docstring''' lowerCamelCase_ : NDArray[floataa] = cross(lowerCAmelCase_ , lowerCAmelCase_) lowerCamelCase_ : float = sum(lowerCAmelCase_) return abs(lowerCAmelCase_) < eps if __name__ == "__main__": # Test to check if it works __magic_name__ = array( [ polar_force(7_18.4, 1_8_0 - 3_0), polar_force(8_79.54, 4_5), polar_force(1_0_0, -9_0), ] ) __magic_name__ = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg __magic_name__ = array( [ polar_force(3_0 * 9.81, 1_5), polar_force(2_1_5, 1_8_0 - 4_5), polar_force(2_6_4, 9_0 - 3_0), ] ) __magic_name__ = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg __magic_name__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]]) __magic_name__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
73
1
def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' return "".join(chr(ord(lowerCAmelCase_) - 32) if "a" <= char <= "z" else char for char in word) if __name__ == "__main__": from doctest import testmod testmod()
73
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Dict = '''ClapFeatureExtractor''' __UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self , a_ , a_ ): super().__init__(a_ , a_ ) def __call__( self , a_=None , a_=None , a_=None , **a_ ): lowerCamelCase_ : Any = kwargs.pop("sampling_rate" , a_ ) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none." ) if text is not None: lowerCamelCase_ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if audios is not None: lowerCamelCase_ : List[str] = self.feature_extractor( a_ , sampling_rate=a_ , return_tensors=a_ , **a_ ) if text is not None and audios is not None: lowerCamelCase_ : List[str] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def _UpperCamelCase ( self , *a_ , **a_ ): return self.tokenizer.batch_decode(*a_ , **a_ ) def _UpperCamelCase ( self , *a_ , **a_ ): return self.tokenizer.decode(*a_ , **a_ ) @property def _UpperCamelCase ( self ): lowerCamelCase_ : int = self.tokenizer.model_input_names lowerCamelCase_ : Dict = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
73
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Dict = '''yolos''' def __init__( self , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.0 , a_=0.0 , a_=0.02 , a_=1E-12 , a_=[512, 864] , a_=16 , a_=3 , a_=True , a_=100 , a_=True , a_=False , a_=1 , a_=5 , a_=2 , a_=5 , a_=2 , a_=0.1 , **a_ , ): super().__init__(**a_ ) lowerCamelCase_ : Optional[Any] = hidden_size lowerCamelCase_ : Optional[int] = num_hidden_layers lowerCamelCase_ : List[str] = num_attention_heads lowerCamelCase_ : Any = intermediate_size lowerCamelCase_ : int = hidden_act lowerCamelCase_ : Dict = hidden_dropout_prob lowerCamelCase_ : Tuple = attention_probs_dropout_prob lowerCamelCase_ : Dict = initializer_range lowerCamelCase_ : Optional[Any] = layer_norm_eps lowerCamelCase_ : Optional[Any] = image_size lowerCamelCase_ : Dict = patch_size lowerCamelCase_ : List[Any] = num_channels lowerCamelCase_ : Any = qkv_bias lowerCamelCase_ : str = num_detection_tokens lowerCamelCase_ : int = use_mid_position_embeddings lowerCamelCase_ : int = auxiliary_loss # Hungarian matcher lowerCamelCase_ : Optional[int] = class_cost lowerCamelCase_ : Optional[Any] = bbox_cost lowerCamelCase_ : Union[str, Any] = giou_cost # Loss coefficients lowerCamelCase_ : Optional[int] = bbox_loss_coefficient lowerCamelCase_ : Tuple = giou_loss_coefficient lowerCamelCase_ : Optional[int] = eos_coefficient class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Dict = version.parse('''1.11''' ) @property def _UpperCamelCase ( self ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _UpperCamelCase ( self ): return 1E-4 @property def _UpperCamelCase ( self ): return 12
73
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ): '''simple docstring''' lowerCamelCase_ : Any = set() # Replace all the whitespace in our sentence lowerCamelCase_ : str = input_str.replace(" " , "") for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower()) return len(lowerCAmelCase_) == 26 def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ): '''simple docstring''' lowerCamelCase_ : List[Any] = [False] * 26 for char in input_str: if char.islower(): lowerCamelCase_ : List[Any] = True elif char.isupper(): lowerCamelCase_ : Optional[int] = True return all(lowerCAmelCase_) def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ): '''simple docstring''' return len({char for char in input_str.lower() if char.isalpha()}) == 26 def __magic_name__ ( ): '''simple docstring''' from timeit import timeit lowerCamelCase_ : Optional[int] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest" print(timeit("is_pangram()" , setup=lowerCAmelCase_)) print(timeit("is_pangram_faster()" , setup=lowerCAmelCase_)) print(timeit("is_pangram_fastest()" , setup=lowerCAmelCase_)) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
73
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''', # See all DPT models at https://huggingface.co/models?filter=dpt } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Optional[Any] = '''dpt''' def __init__( self , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.0 , a_=0.0 , a_=0.02 , a_=1E-12 , a_=384 , a_=16 , a_=3 , a_=False , a_=True , a_=[2, 5, 8, 11] , a_="project" , a_=[4, 2, 1, 0.5] , a_=[96, 192, 384, 768] , a_=256 , a_=-1 , a_=False , a_=True , a_=0.4 , a_=255 , a_=0.1 , a_=[1, 1024, 24, 24] , a_=[0, 1] , a_=None , **a_ , ): super().__init__(**a_ ) lowerCamelCase_ : List[str] = hidden_size lowerCamelCase_ : Optional[Any] = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info("Initializing the config with a `BiT` backbone." ) lowerCamelCase_ : Any = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, } lowerCamelCase_ : Any = BitConfig(**a_ ) elif isinstance(a_ , a_ ): logger.info("Initializing the config with a `BiT` backbone." ) lowerCamelCase_ : Dict = BitConfig(**a_ ) elif isinstance(a_ , a_ ): lowerCamelCase_ : Dict = backbone_config else: raise ValueError( F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" ) lowerCamelCase_ : int = backbone_featmap_shape lowerCamelCase_ : Union[str, Any] = neck_ignore_stages if readout_type != "project": raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." ) else: lowerCamelCase_ : str = None lowerCamelCase_ : Optional[Any] = None lowerCamelCase_ : List[str] = [] lowerCamelCase_ : Optional[Any] = num_hidden_layers lowerCamelCase_ : List[str] = num_attention_heads lowerCamelCase_ : str = intermediate_size lowerCamelCase_ : Dict = hidden_act lowerCamelCase_ : int = hidden_dropout_prob lowerCamelCase_ : str = attention_probs_dropout_prob lowerCamelCase_ : Optional[Any] = initializer_range lowerCamelCase_ : Union[str, Any] = layer_norm_eps lowerCamelCase_ : Optional[Any] = image_size lowerCamelCase_ : int = patch_size lowerCamelCase_ : Union[str, Any] = num_channels lowerCamelCase_ : Tuple = qkv_bias lowerCamelCase_ : Optional[int] = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" ) lowerCamelCase_ : Dict = readout_type lowerCamelCase_ : int = reassemble_factors lowerCamelCase_ : Any = neck_hidden_sizes lowerCamelCase_ : int = fusion_hidden_size lowerCamelCase_ : Union[str, Any] = head_in_index lowerCamelCase_ : Optional[int] = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) lowerCamelCase_ : List[str] = use_auxiliary_head lowerCamelCase_ : str = auxiliary_loss_weight lowerCamelCase_ : Union[str, Any] = semantic_loss_ignore_index lowerCamelCase_ : int = semantic_classifier_dropout def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: lowerCamelCase_ : int = self.backbone_config.to_dict() lowerCamelCase_ : Union[str, Any] = self.__class__.model_type return output
73
__magic_name__ = { "joule": 1.0, "kilojoule": 1_0_0_0, "megajoule": 1_0_0_0_0_0_0, "gigajoule": 1_0_0_0_0_0_0_0_0_0, "wattsecond": 1.0, "watthour": 3_6_0_0, "kilowatthour": 3_6_0_0_0_0_0, "newtonmeter": 1.0, "calorie_nutr": 4_1_8_6.8, "kilocalorie_nutr": 4_1_8_6_8_0_0.0_0, "electronvolt": 1.602_176_634E-19, "britishthermalunit_it": 1_0_5_5.0_5_5_8_5, "footpound": 1.35_58_18, } def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: lowerCamelCase_ : List[Any] = ( F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" F"""Valid values are: {', '.join(lowerCAmelCase_)}""" ) raise ValueError(lowerCAmelCase_) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
73
1
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __magic_name__ = 1_6 __magic_name__ = 3_2 def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ = 16): '''simple docstring''' lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained("bert-base-cased") lowerCamelCase_ : List[str] = load_dataset("glue" , "mrpc") def tokenize_function(lowerCAmelCase_): # max_length=None => use the model max length (it's actually the default) lowerCamelCase_ : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCamelCase_ : str = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase_ : int = tokenized_datasets.rename_column("label" , "labels") def collate_fn(lowerCAmelCase_): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCamelCase_ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCamelCase_ : Dict = 16 elif accelerator.mixed_precision != "no": lowerCamelCase_ : Optional[int] = 8 else: lowerCamelCase_ : Optional[Any] = None return tokenizer.pad( lowerCAmelCase_ , padding="longest" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="pt" , ) # Instantiate dataloaders. lowerCamelCase_ : Optional[int] = DataLoader( tokenized_datasets["train"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_) lowerCamelCase_ : str = DataLoader( tokenized_datasets["validation"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __magic_name__ = mocked_dataloaders # noqa: F811 def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCAmelCase_) == "1": lowerCamelCase_ : List[str] = 2 # Initialize accelerator lowerCamelCase_ : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase_ : Dict = config["lr"] lowerCamelCase_ : Dict = int(config["num_epochs"]) lowerCamelCase_ : int = int(config["seed"]) lowerCamelCase_ : Union[str, Any] = int(config["batch_size"]) lowerCamelCase_ : List[str] = evaluate.load("glue" , "mrpc") # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowerCAmelCase_) def inner_training_loop(lowerCAmelCase_): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowerCAmelCase_) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCAmelCase_) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCamelCase_ : int = model.to(accelerator.device) # Instantiate optimizer lowerCamelCase_ : Dict = AdamW(params=model.parameters() , lr=lowerCAmelCase_) lowerCamelCase_ ,lowerCamelCase_ : Dict = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_) # Instantiate scheduler lowerCamelCase_ : str = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[Any] = accelerator.prepare( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) # Now we train the model for epoch in range(lowerCAmelCase_): model.train() for step, batch in enumerate(lowerCAmelCase_): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) lowerCamelCase_ : Tuple = model(**lowerCAmelCase_) lowerCamelCase_ : str = outputs.loss accelerator.backward(lowerCAmelCase_) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase_): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): lowerCamelCase_ : List[str] = model(**lowerCAmelCase_) lowerCamelCase_ : str = outputs.logits.argmax(dim=-1) lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , ) lowerCamelCase_ : Union[str, Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase_) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU.") lowerCamelCase_ : List[Any] = parser.parse_args() lowerCamelCase_ : int = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(lowerCAmelCase_ , lowerCAmelCase_) if __name__ == "__main__": main()
73
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'''vocab_file''': '''spiece.model'''} __magic_name__ = { '''vocab_file''': { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''', } } __magic_name__ = { '''xlnet-base-cased''': None, '''xlnet-large-cased''': None, } # Segments (not really needed) __magic_name__ = 0 __magic_name__ = 1 __magic_name__ = 2 __magic_name__ = 3 __magic_name__ = 4 class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Tuple = VOCAB_FILES_NAMES __UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Optional[int] = '''left''' def __init__( self , a_ , a_=False , a_=True , a_=False , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_=["<eop>", "<eod>"] , a_ = None , **a_ , ): # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token lowerCamelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , ) lowerCamelCase_ : str = 3 lowerCamelCase_ : Dict = do_lower_case lowerCamelCase_ : str = remove_space lowerCamelCase_ : Tuple = keep_accents lowerCamelCase_ : Dict = vocab_file lowerCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(a_ ) @property def _UpperCamelCase ( self ): return len(self.sp_model ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): lowerCamelCase_ : Any = self.__dict__.copy() lowerCamelCase_ : Optional[int] = None return state def __setstate__( self , a_ ): lowerCamelCase_ : Union[str, Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCamelCase_ : int = {} lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _UpperCamelCase ( self , a_ ): if self.remove_space: lowerCamelCase_ : Optional[int] = " ".join(inputs.strip().split() ) else: lowerCamelCase_ : str = inputs lowerCamelCase_ : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: lowerCamelCase_ : Dict = unicodedata.normalize("NFKD" , a_ ) lowerCamelCase_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] ) if self.do_lower_case: lowerCamelCase_ : Any = outputs.lower() return outputs def _UpperCamelCase ( self , a_ ): lowerCamelCase_ : List[Any] = self.preprocess_text(a_ ) lowerCamelCase_ : Optional[int] = self.sp_model.encode(a_ , out_type=a_ ) lowerCamelCase_ : List[str] = [] for piece in pieces: if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): lowerCamelCase_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCamelCase_ : int = cur_pieces[1:] else: lowerCamelCase_ : Union[str, Any] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(a_ ) else: new_pieces.append(a_ ) return new_pieces def _UpperCamelCase ( self , a_ ): return self.sp_model.PieceToId(a_ ) def _UpperCamelCase ( self , a_ ): return self.sp_model.IdToPiece(a_ ) def _UpperCamelCase ( self , a_ ): lowerCamelCase_ : Dict = "".join(a_ ).replace(a_ , " " ).strip() return out_string def _UpperCamelCase ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ): lowerCamelCase_ : int = kwargs.pop("use_source_tokenizer" , a_ ) lowerCamelCase_ : List[str] = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 lowerCamelCase_ : Optional[int] = [] lowerCamelCase_ : List[str] = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(a_ ) ) lowerCamelCase_ : Union[str, Any] = [] sub_texts.append(a_ ) else: current_sub_text.append(a_ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(a_ ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens lowerCamelCase_ : Union[str, Any] = "".join(a_ ) lowerCamelCase_ : Optional[Any] = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: lowerCamelCase_ : List[Any] = self.clean_up_tokenization(a_ ) return clean_text else: return text def _UpperCamelCase ( self , a_ , a_ = None ): lowerCamelCase_ : Optional[Any] = [self.sep_token_id] lowerCamelCase_ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ ) if token_ids_a is not None: return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1] return ([0] * len(a_ )) + [1, 1] def _UpperCamelCase ( self , a_ , a_ = None ): lowerCamelCase_ : Optional[Any] = [self.sep_token_id] lowerCamelCase_ : Union[str, Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _UpperCamelCase ( self , a_ , a_ = None ): if not os.path.isdir(a_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCamelCase_ : Any = os.path.join( a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , a_ ) elif not os.path.isfile(self.vocab_file ): with open(a_ , "wb" ) as fi: lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto() fi.write(a_ ) return (out_vocab_file,)
73
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __magic_name__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
73
def __magic_name__ ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = True): '''simple docstring''' assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError("Invalid value for min_val or max_val (min_value < max_value)") return min_val if option else max_val def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' return int((number_a + number_a) / 2) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) ), 'argument values must be type of "int"' if lower > higher: raise ValueError("argument value for lower and higher must be(lower > higher)") if not lower < to_guess < higher: raise ValueError( "guess value must be within the range of lower and higher value") def answer(lowerCAmelCase_) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print("started...") lowerCamelCase_ : Optional[int] = lower lowerCamelCase_ : Tuple = higher lowerCamelCase_ : Union[str, Any] = [] while True: lowerCamelCase_ : Optional[int] = get_avg(lowerCAmelCase_ , lowerCAmelCase_) last_numbers.append(lowerCAmelCase_) if answer(lowerCAmelCase_) == "low": lowerCamelCase_ : Any = number elif answer(lowerCAmelCase_) == "high": lowerCamelCase_ : Optional[int] = number else: break print(F"""guess the number : {last_numbers[-1]}""") print(F"""details : {last_numbers!s}""") def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : Optional[int] = int(input("Enter lower value : ").strip()) lowerCamelCase_ : List[str] = int(input("Enter high value : ").strip()) lowerCamelCase_ : List[str] = int(input("Enter value to guess : ").strip()) guess_the_number(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) if __name__ == "__main__": main()
73
1
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ): lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18} lowerCamelCase_ : str = parent lowerCamelCase_ : str = batch_size lowerCamelCase_ : Tuple = num_channels lowerCamelCase_ : Optional[int] = image_size lowerCamelCase_ : List[str] = min_resolution lowerCamelCase_ : Tuple = max_resolution lowerCamelCase_ : Tuple = do_resize lowerCamelCase_ : Dict = size lowerCamelCase_ : List[str] = apply_ocr def _UpperCamelCase ( self ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self ) @property def _UpperCamelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , "do_resize" ) ) self.assertTrue(hasattr(a_ , "size" ) ) self.assertTrue(hasattr(a_ , "apply_ocr" ) ) def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def _UpperCamelCase ( self ): pass def _UpperCamelCase ( self ): # Initialize image_processing lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) self.assertIsInstance(encoding.words , a_ ) self.assertIsInstance(encoding.boxes , a_ ) # Test batched lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase ( self ): # Initialize image_processing lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase ( self ): # Initialize image_processing lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase ( self ): # with apply_OCR = True lowerCamelCase_ : Any = LayoutLMvaImageProcessor() from datasets import load_dataset lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" ) lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" ) lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , a_ ) self.assertListEqual(encoding.boxes , a_ ) # with apply_OCR = False lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ ) lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
73
from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''', # See all Cvt models at https://huggingface.co/models?filter=cvt } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : List[str] = '''cvt''' def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ): super().__init__(**a_ ) lowerCamelCase_ : Optional[Any] = num_channels lowerCamelCase_ : str = patch_sizes lowerCamelCase_ : List[Any] = patch_stride lowerCamelCase_ : str = patch_padding lowerCamelCase_ : str = embed_dim lowerCamelCase_ : Union[str, Any] = num_heads lowerCamelCase_ : Optional[Any] = depth lowerCamelCase_ : int = mlp_ratio lowerCamelCase_ : Union[str, Any] = attention_drop_rate lowerCamelCase_ : Optional[Any] = drop_rate lowerCamelCase_ : Optional[int] = drop_path_rate lowerCamelCase_ : Union[str, Any] = qkv_bias lowerCamelCase_ : int = cls_token lowerCamelCase_ : int = qkv_projection_method lowerCamelCase_ : int = kernel_qkv lowerCamelCase_ : Optional[Any] = padding_kv lowerCamelCase_ : Optional[int] = stride_kv lowerCamelCase_ : Optional[int] = padding_q lowerCamelCase_ : List[Any] = stride_q lowerCamelCase_ : Any = initializer_range lowerCamelCase_ : int = layer_norm_eps
73
1
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase_) - ngram_size + 1)] if __name__ == "__main__": from doctest import testmod testmod()
73
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __magic_name__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
73
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Dict = '''ctrl''' __UpperCAmelCase : Dict = ['''past_key_values'''] __UpperCAmelCase : int = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , a_=24_6534 , a_=256 , a_=1280 , a_=8192 , a_=48 , a_=16 , a_=0.1 , a_=0.1 , a_=1E-6 , a_=0.02 , a_=True , **a_ , ): lowerCamelCase_ : Dict = vocab_size lowerCamelCase_ : Any = n_positions lowerCamelCase_ : Optional[int] = n_embd lowerCamelCase_ : List[Any] = n_layer lowerCamelCase_ : Union[str, Any] = n_head lowerCamelCase_ : str = dff lowerCamelCase_ : Tuple = resid_pdrop lowerCamelCase_ : Any = embd_pdrop lowerCamelCase_ : Dict = layer_norm_epsilon lowerCamelCase_ : Tuple = initializer_range lowerCamelCase_ : Any = use_cache super().__init__(**a_ )
73
from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Dict = '''EncodecFeatureExtractor''' __UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self , a_ , a_ ): super().__init__(a_ , a_ ) lowerCamelCase_ : Optional[Any] = self.feature_extractor lowerCamelCase_ : Optional[int] = False def _UpperCamelCase ( self , a_=None , a_=None , a_=True ): return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ ) def __call__( self , *a_ , **a_ ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*a_ , **a_ ) lowerCamelCase_ : str = kwargs.pop("audio" , a_ ) lowerCamelCase_ : List[str] = kwargs.pop("sampling_rate" , a_ ) lowerCamelCase_ : Optional[Any] = kwargs.pop("text" , a_ ) if len(a_ ) > 0: lowerCamelCase_ : int = args[0] lowerCamelCase_ : str = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if text is not None: lowerCamelCase_ : Dict = self.tokenizer(a_ , **a_ ) if audio is not None: lowerCamelCase_ : Optional[Any] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ ) if audio is None: return inputs elif text is None: return audio_inputs else: lowerCamelCase_ : Dict = audio_inputs["input_values"] if "padding_mask" in audio_inputs: lowerCamelCase_ : int = audio_inputs["padding_mask"] return inputs def _UpperCamelCase ( self , *a_ , **a_ ): lowerCamelCase_ : Dict = kwargs.pop("audio" , a_ ) lowerCamelCase_ : Optional[Any] = kwargs.pop("padding_mask" , a_ ) if len(a_ ) > 0: lowerCamelCase_ : Optional[int] = args[0] lowerCamelCase_ : Optional[Any] = args[1:] if audio_values is not None: return self._decode_audio(a_ , padding_mask=a_ ) else: return self.tokenizer.batch_decode(*a_ , **a_ ) def _UpperCamelCase ( self , *a_ , **a_ ): return self.tokenizer.decode(*a_ , **a_ ) def _UpperCamelCase ( self , a_ , a_ = None ): lowerCamelCase_ : Any = to_numpy(a_ ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = audio_values.shape if padding_mask is None: return list(a_ ) lowerCamelCase_ : Tuple = to_numpy(a_ ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) lowerCamelCase_ : List[str] = seq_len - padding_mask.shape[-1] lowerCamelCase_ : int = 1 - self.feature_extractor.padding_value lowerCamelCase_ : List[Any] = np.pad(a_ , ((0, 0), (0, difference)) , "constant" , constant_values=a_ ) lowerCamelCase_ : str = audio_values.tolist() for i in range(a_ ): lowerCamelCase_ : Dict = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] lowerCamelCase_ : Dict = sliced_audio.reshape(a_ , -1 ) return audio_values
73
1
def __magic_name__ ( lowerCAmelCase_ = 100): '''simple docstring''' lowerCamelCase_ : Dict = 0 lowerCamelCase_ : str = 0 for i in range(1 , n + 1): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(f'''{solution() = }''')
73
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if digit_amount > 0: return round(number - int(lowerCAmelCase_) , lowerCAmelCase_) return number - int(lowerCAmelCase_) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.3_45, 1)) print(decimal_isolate(35.3_45, 2)) print(decimal_isolate(35.3_45, 3)) print(decimal_isolate(-14.7_89, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.1_23, 1)) print(decimal_isolate(-14.1_23, 2)) print(decimal_isolate(-14.1_23, 3))
73
1
from typing import Any def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ): '''simple docstring''' _validation( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) # Creates data structures and fill initial step lowerCamelCase_ : dict = {} lowerCamelCase_ : dict = {} for state in states_space: lowerCamelCase_ : Union[str, Any] = observations_space[0] lowerCamelCase_ : List[Any] = ( initial_probabilities[state] * emission_probabilities[state][observation] ) lowerCamelCase_ : Any = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(lowerCAmelCase_)): lowerCamelCase_ : Optional[Any] = observations_space[o] lowerCamelCase_ : Tuple = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function lowerCamelCase_ : List[str] = "" lowerCamelCase_ : Dict = -1 for k_state in states_space: lowerCamelCase_ : List[str] = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: lowerCamelCase_ : List[Any] = probability lowerCamelCase_ : List[str] = k_state # Update probabilities and pointers dicts lowerCamelCase_ : Union[str, Any] = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) lowerCamelCase_ : Optional[Any] = arg_max # The final observation lowerCamelCase_ : Dict = observations_space[len(lowerCAmelCase_) - 1] # argmax for given final observation lowerCamelCase_ : Optional[Any] = "" lowerCamelCase_ : Tuple = -1 for k_state in states_space: lowerCamelCase_ : Optional[Any] = probabilities[(k_state, final_observation)] if probability > max_probability: lowerCamelCase_ : Optional[Any] = probability lowerCamelCase_ : Tuple = k_state lowerCamelCase_ : List[str] = arg_max # Process pointers backwards lowerCamelCase_ : Union[str, Any] = last_state lowerCamelCase_ : Optional[Any] = [] for o in range(len(lowerCAmelCase_) - 1 , -1 , -1): result.append(lowerCAmelCase_) lowerCamelCase_ : Optional[Any] = pointers[previous, observations_space[o]] result.reverse() return result def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ): '''simple docstring''' _validate_not_empty( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) _validate_lists(lowerCAmelCase_ , lowerCAmelCase_) _validate_dicts( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ): '''simple docstring''' if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ]): raise ValueError("There's an empty parameter") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' _validate_list(lowerCAmelCase_ , "observations_space") _validate_list(lowerCAmelCase_ , "states_space") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if not isinstance(_object , lowerCAmelCase_): lowerCamelCase_ : Union[str, Any] = F"""{var_name} must be a list""" raise ValueError(lowerCAmelCase_) else: for x in _object: if not isinstance(lowerCAmelCase_ , lowerCAmelCase_): lowerCamelCase_ : List[Any] = F"""{var_name} must be a list of strings""" raise ValueError(lowerCAmelCase_) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ): '''simple docstring''' _validate_dict(lowerCAmelCase_ , "initial_probabilities" , lowerCAmelCase_) _validate_nested_dict(lowerCAmelCase_ , "transition_probabilities") _validate_nested_dict(lowerCAmelCase_ , "emission_probabilities") def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' _validate_dict(_object , lowerCAmelCase_ , lowerCAmelCase_) for x in _object.values(): _validate_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False): '''simple docstring''' if not isinstance(_object , lowerCAmelCase_): lowerCamelCase_ : List[str] = F"""{var_name} must be a dict""" raise ValueError(lowerCAmelCase_) if not all(isinstance(lowerCAmelCase_ , lowerCAmelCase_) for x in _object): lowerCamelCase_ : List[str] = F"""{var_name} all keys must be strings""" raise ValueError(lowerCAmelCase_) if not all(isinstance(lowerCAmelCase_ , lowerCAmelCase_) for x in _object.values()): lowerCamelCase_ : Union[str, Any] = "nested dictionary " if nested else "" lowerCamelCase_ : str = F"""{var_name} {nested_text}all values must be {value_type.__name__}""" raise ValueError(lowerCAmelCase_) if __name__ == "__main__": from doctest import testmod testmod()
73
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ): lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18} lowerCamelCase_ : str = parent lowerCamelCase_ : str = batch_size lowerCamelCase_ : Tuple = num_channels lowerCamelCase_ : Optional[int] = image_size lowerCamelCase_ : List[str] = min_resolution lowerCamelCase_ : Tuple = max_resolution lowerCamelCase_ : Tuple = do_resize lowerCamelCase_ : Dict = size lowerCamelCase_ : List[str] = apply_ocr def _UpperCamelCase ( self ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self ) @property def _UpperCamelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , "do_resize" ) ) self.assertTrue(hasattr(a_ , "size" ) ) self.assertTrue(hasattr(a_ , "apply_ocr" ) ) def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def _UpperCamelCase ( self ): pass def _UpperCamelCase ( self ): # Initialize image_processing lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) self.assertIsInstance(encoding.words , a_ ) self.assertIsInstance(encoding.boxes , a_ ) # Test batched lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase ( self ): # Initialize image_processing lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase ( self ): # Initialize image_processing lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase ( self ): # with apply_OCR = True lowerCamelCase_ : Any = LayoutLMvaImageProcessor() from datasets import load_dataset lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" ) lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" ) lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , a_ ) self.assertListEqual(encoding.boxes , a_ ) # with apply_OCR = False lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ ) lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
73
1
from math import pi def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(9_0, 1_0))
73
from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''', '''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''', } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : List[Any] = '''luke''' def __init__( self , a_=5_0267 , a_=50_0000 , a_=768 , a_=256 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=True , a_=None , a_=1 , a_=0 , a_=2 , **a_ , ): super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ ) lowerCamelCase_ : Tuple = vocab_size lowerCamelCase_ : Optional[int] = entity_vocab_size lowerCamelCase_ : Any = hidden_size lowerCamelCase_ : Dict = entity_emb_size lowerCamelCase_ : List[Any] = num_hidden_layers lowerCamelCase_ : int = num_attention_heads lowerCamelCase_ : Union[str, Any] = hidden_act lowerCamelCase_ : Tuple = intermediate_size lowerCamelCase_ : Optional[Any] = hidden_dropout_prob lowerCamelCase_ : Any = attention_probs_dropout_prob lowerCamelCase_ : Optional[Any] = max_position_embeddings lowerCamelCase_ : str = type_vocab_size lowerCamelCase_ : int = initializer_range lowerCamelCase_ : List[Any] = layer_norm_eps lowerCamelCase_ : Optional[int] = use_entity_aware_attention lowerCamelCase_ : str = classifier_dropout
73
1
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase ( self ): lowerCamelCase_ : int = ["a", "b", "c"] # Defaults to last layer if both are None lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , a_ , a_ ) self.assertEqual(a_ , ["c"] ) self.assertEqual(a_ , [2] ) # Out indices set to match out features lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ ) self.assertEqual(a_ , ["a", "c"] ) self.assertEqual(a_ , [0, 2] ) # Out features set to match out indices lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ ) self.assertEqual(a_ , ["a", "c"] ) self.assertEqual(a_ , [0, 2] ) # Out features selected from negative indices lowerCamelCase_ ,lowerCamelCase_ : Dict = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ ) self.assertEqual(a_ , ["a", "c"] ) self.assertEqual(a_ , [-3, -1] ) def _UpperCamelCase ( self ): # Stage names must be set with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ ) # Out features must be a list with self.assertRaises(a_ ): verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] ) # Out features must be a subset of stage names with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] ) # Out indices must be a list or tuple with self.assertRaises(a_ ): verify_out_features_out_indices(a_ , 0 , ["a", "b"] ) # Out indices must be a subset of stage names with self.assertRaises(a_ ): verify_out_features_out_indices(a_ , (0, 1) , ["a"] ) # Out features and out indices must be the same length with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] ) # Out features should match out indices with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] ) # Out features and out indices should be in order with self.assertRaises(a_ ): verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] ) # Check passes with valid inputs verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = BackboneMixin() lowerCamelCase_ : List[Any] = ["a", "b", "c"] lowerCamelCase_ : Optional[int] = ["a", "c"] lowerCamelCase_ : Dict = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly lowerCamelCase_ : Union[str, Any] = ["a", "b"] self.assertEqual(backbone.out_features , ["a", "b"] ) self.assertEqual(backbone.out_indices , [0, 1] ) lowerCamelCase_ : str = [-3, -1] self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [-3, -1] )
73
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int __magic_name__ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class lowerCAmelCase__ ( datasets.BuilderConfig ): """simple docstring""" __UpperCAmelCase : Optional[datasets.Features] = None def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , ): '''simple docstring''' import pyspark def generate_fn(): lowerCamelCase_ : Dict = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id")) for partition_id in partition_order: lowerCamelCase_ : Dict = df_with_partition_id.select("*").where(F"""part_id = {partition_id}""").drop("part_id") lowerCamelCase_ : Dict = partition_df.collect() lowerCamelCase_ : Dict = 0 for row in rows: yield F"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class lowerCAmelCase__ ( _BaseExamplesIterable ): """simple docstring""" def __init__( self , a_ , a_=None , ): lowerCamelCase_ : Dict = df lowerCamelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() ) lowerCamelCase_ : int = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self ): yield from self.generate_examples_fn() def _UpperCamelCase ( self , a_ ): lowerCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(a_ ) return SparkExamplesIterable(self.df , partition_order=a_ ) def _UpperCamelCase ( self , a_ , a_ ): lowerCamelCase_ : Dict = self.split_shard_indices_by_worker(a_ , a_ ) return SparkExamplesIterable(self.df , partition_order=a_ ) @property def _UpperCamelCase ( self ): return len(self.partition_order ) class lowerCAmelCase__ ( datasets.DatasetBuilder ): """simple docstring""" __UpperCAmelCase : Any = SparkConfig def __init__( self , a_ , a_ = None , a_ = None , **a_ , ): import pyspark lowerCamelCase_ : str = pyspark.sql.SparkSession.builder.getOrCreate() lowerCamelCase_ : Optional[Any] = df lowerCamelCase_ : List[Any] = working_dir super().__init__( cache_dir=a_ , config_name=str(self.df.semanticHash() ) , **a_ , ) def _UpperCamelCase ( self ): # Returns the path of the created file. def create_cache_and_write_probe(a_ ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=a_ ) lowerCamelCase_ : Optional[Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(a_ , "a" ) return [probe_file] if self._spark.conf.get("spark.master" , "" ).startswith("local" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: lowerCamelCase_ : List[str] = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" ) def _UpperCamelCase ( self ): return datasets.DatasetInfo(features=self.config.features ) def _UpperCamelCase ( self , a_ ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def _UpperCamelCase ( self , a_ ): import pyspark def get_arrow_batch_size(a_ ): for batch in it: yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} ) lowerCamelCase_ : str = self.df.count() lowerCamelCase_ : List[Any] = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. lowerCamelCase_ : Any = ( self.df.limit(a_ ) .repartition(1 ) .mapInArrow(a_ , "batch_bytes: long" ) .agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) ) .collect()[0] .sample_bytes / sample_num_rows ) lowerCamelCase_ : int = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. lowerCamelCase_ : Union[str, Any] = min(a_ , int(approx_total_size / max_shard_size ) ) lowerCamelCase_ : int = self.df.repartition(a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , ): import pyspark lowerCamelCase_ : str = ParquetWriter if file_format == "parquet" else ArrowWriter lowerCamelCase_ : int = os.path.join(self._working_dir , os.path.basename(a_ ) ) if self._working_dir else fpath lowerCamelCase_ : Optional[Any] = file_format == "parquet" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. lowerCamelCase_ : int = self.config.features lowerCamelCase_ : Any = self._writer_batch_size lowerCamelCase_ : Tuple = self._fs.storage_options def write_arrow(a_ ): # Within the same SparkContext, no two task attempts will share the same attempt ID. lowerCamelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId() lowerCamelCase_ : Optional[int] = next(a_ , a_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , ) lowerCamelCase_ : List[Any] = 0 lowerCamelCase_ : Optional[int] = writer_class( features=a_ , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , ) lowerCamelCase_ : Optional[Any] = pa.Table.from_batches([first_batch] ) writer.write_table(a_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: lowerCamelCase_ ,lowerCamelCase_ : List[str] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) shard_id += 1 lowerCamelCase_ : List[str] = writer_class( features=writer._features , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , ) lowerCamelCase_ : Optional[int] = pa.Table.from_batches([batch] ) writer.write_table(a_ ) if writer._num_bytes > 0: lowerCamelCase_ ,lowerCamelCase_ : Dict = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(a_ ) ): lowerCamelCase_ : str = os.path.join(os.path.dirname(a_ ) , os.path.basename(a_ ) ) shutil.move(a_ , a_ ) lowerCamelCase_ : int = ( self.df.mapInArrow(a_ , "task_id: long, num_examples: long, num_bytes: long" ) .groupBy("task_id" ) .agg( pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def _UpperCamelCase ( self , a_ , a_ = "arrow" , a_ = None , a_ = None , **a_ , ): self._validate_cache_dir() lowerCamelCase_ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(a_ ) lowerCamelCase_ : Dict = not is_remote_filesystem(self._fs ) lowerCamelCase_ : List[str] = os.path.join if is_local else posixpath.join lowerCamelCase_ : Any = "-TTTTT-SSSSS-of-NNNNN" lowerCamelCase_ : List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" lowerCamelCase_ : int = path_join(self._output_dir , a_ ) lowerCamelCase_ : int = 0 lowerCamelCase_ : Optional[Any] = 0 lowerCamelCase_ : int = 0 lowerCamelCase_ : Dict = [] lowerCamelCase_ : Any = [] for task_id, content in self._prepare_split_single(a_ , a_ , a_ ): ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) : Tuple = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(a_ ) lowerCamelCase_ : Dict = total_num_examples lowerCamelCase_ : Any = total_num_bytes # should rename everything at the end logger.debug(F"""Renaming {total_shards} shards.""" ) if total_shards > 1: lowerCamelCase_ : List[Any] = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. lowerCamelCase_ : Any = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( a_ , a_ , a_ , ): rename( a_ , fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , F"""{global_shard_id:05d}""" ).replace("NNNNN" , F"""{total_shards:05d}""" ) , ) lowerCamelCase_ : Optional[int] = [] lowerCamelCase_ : Dict = 0 for i in range(len(a_ ) ): lowerCamelCase_ ,lowerCamelCase_ : Tuple = task_id_and_num_shards[i] for shard_id in range(a_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(a_ , len(a_ ) ).map(lambda a_ : _rename_shard(*a_ ) ).collect() else: # don't use any pattern lowerCamelCase_ : int = 0 lowerCamelCase_ : Optional[int] = task_id_and_num_shards[0][0] self._rename( fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace(a_ , "" ) , ) def _UpperCamelCase ( self , a_ , ): return SparkExamplesIterable(self.df )
73
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''', '''Salesforce/blip-vqa-capfit-large''': ( '''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json''' ), '''Salesforce/blip-image-captioning-base''': ( '''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json''' ), '''Salesforce/blip-image-captioning-large''': ( '''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json''' ), '''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''', '''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''', '''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''', '''Salesforce/blip-itm-large-flikr''': ( '''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json''' ), } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : List[str] = '''blip_text_model''' def __init__( self , a_=3_0524 , a_=768 , a_=768 , a_=3072 , a_=768 , a_=12 , a_=8 , a_=512 , a_="gelu" , a_=1E-12 , a_=0.0 , a_=0.0 , a_=0.02 , a_=3_0522 , a_=2 , a_=0 , a_=102 , a_=True , a_=True , **a_ , ): super().__init__( pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , sep_token_id=a_ , **a_ , ) lowerCamelCase_ : Dict = vocab_size lowerCamelCase_ : List[Any] = hidden_size lowerCamelCase_ : Optional[int] = encoder_hidden_size lowerCamelCase_ : List[Any] = intermediate_size lowerCamelCase_ : Optional[Any] = projection_dim lowerCamelCase_ : List[Any] = hidden_dropout_prob lowerCamelCase_ : Tuple = num_hidden_layers lowerCamelCase_ : int = num_attention_heads lowerCamelCase_ : str = max_position_embeddings lowerCamelCase_ : Optional[Any] = layer_norm_eps lowerCamelCase_ : Union[str, Any] = hidden_act lowerCamelCase_ : Tuple = initializer_range lowerCamelCase_ : Optional[int] = attention_probs_dropout_prob lowerCamelCase_ : int = is_decoder lowerCamelCase_ : Union[str, Any] = use_cache @classmethod def _UpperCamelCase ( cls , a_ , **a_ ): cls._set_token_in_kwargs(a_ ) lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = cls.get_config_dict(a_ , **a_ ) # get the text config dict if we are loading from BlipConfig if config_dict.get("model_type" ) == "blip": lowerCamelCase_ : str = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(a_ , **a_ ) class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Any = '''blip_vision_model''' def __init__( self , a_=768 , a_=3072 , a_=512 , a_=12 , a_=12 , a_=384 , a_=16 , a_="gelu" , a_=1E-5 , a_=0.0 , a_=1E-10 , **a_ , ): super().__init__(**a_ ) lowerCamelCase_ : List[Any] = hidden_size lowerCamelCase_ : int = intermediate_size lowerCamelCase_ : str = projection_dim lowerCamelCase_ : List[str] = num_hidden_layers lowerCamelCase_ : Dict = num_attention_heads lowerCamelCase_ : Dict = patch_size lowerCamelCase_ : Optional[int] = image_size lowerCamelCase_ : Dict = initializer_range lowerCamelCase_ : int = attention_dropout lowerCamelCase_ : Tuple = layer_norm_eps lowerCamelCase_ : Optional[Any] = hidden_act @classmethod def _UpperCamelCase ( cls , a_ , **a_ ): cls._set_token_in_kwargs(a_ ) lowerCamelCase_ ,lowerCamelCase_ : List[Any] = cls.get_config_dict(a_ , **a_ ) # get the vision config dict if we are loading from BlipConfig if config_dict.get("model_type" ) == "blip": lowerCamelCase_ : List[Any] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(a_ , **a_ ) class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Optional[int] = '''blip''' __UpperCAmelCase : Optional[int] = True def __init__( self , a_=None , a_=None , a_=512 , a_=2.65_92 , a_=256 , **a_ , ): super().__init__(**a_ ) if text_config is None: lowerCamelCase_ : Tuple = {} logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." ) if vision_config is None: lowerCamelCase_ : List[str] = {} logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." ) lowerCamelCase_ : Dict = BlipTextConfig(**a_ ) lowerCamelCase_ : str = BlipVisionConfig(**a_ ) lowerCamelCase_ : int = self.vision_config.hidden_size lowerCamelCase_ : List[str] = projection_dim lowerCamelCase_ : Any = logit_scale_init_value lowerCamelCase_ : int = 1.0 lowerCamelCase_ : List[Any] = 0.02 lowerCamelCase_ : int = image_text_hidden_size @classmethod def _UpperCamelCase ( cls , a_ , a_ , **a_ ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ ) lowerCamelCase_ : Tuple = self.text_config.to_dict() lowerCamelCase_ : Tuple = self.vision_config.to_dict() lowerCamelCase_ : int = self.__class__.model_type return output
73
from queue import PriorityQueue from typing import Any import numpy as np def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ): '''simple docstring''' for nxt, d in graph[v]: if nxt in visited_forward: continue lowerCamelCase_ : List[str] = cst_fwd.get(lowerCAmelCase_ , np.inf) lowerCamelCase_ : Dict = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt)) lowerCamelCase_ : Optional[int] = new_cost_f lowerCamelCase_ : List[str] = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: lowerCamelCase_ : Tuple = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[Any] = -1 lowerCamelCase_ : Tuple = set() lowerCamelCase_ : Dict = set() lowerCamelCase_ : int = {source: 0} lowerCamelCase_ : str = {destination: 0} lowerCamelCase_ : Tuple = {source: None} lowerCamelCase_ : Dict = {destination: None} lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue() lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue() lowerCamelCase_ : List[str] = np.inf queue_forward.put((0, source)) queue_backward.put((0, destination)) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): lowerCamelCase_ ,lowerCamelCase_ : List[Any] = queue_forward.get() visited_forward.add(lowerCAmelCase_) lowerCamelCase_ ,lowerCamelCase_ : str = queue_backward.get() visited_backward.add(lowerCAmelCase_) lowerCamelCase_ : Any = pass_and_relaxation( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) lowerCamelCase_ : Dict = pass_and_relaxation( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: lowerCamelCase_ : Union[str, Any] = shortest_distance return shortest_path_distance __magic_name__ = { '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } __magic_name__ = { '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
73
1
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. __magic_name__ = 1_0 def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' for i in range(lowerCAmelCase_ , lowerCAmelCase_): if array[i] == target: return i return -1 def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : str = 0 lowerCamelCase_ : List[Any] = len(lowerCAmelCase_) while left <= right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) lowerCamelCase_ : Union[str, Any] = (left + right) // 3 + 1 lowerCamelCase_ : Optional[int] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: lowerCamelCase_ : int = one_third - 1 elif array[two_third] < target: lowerCamelCase_ : Any = two_third + 1 else: lowerCamelCase_ : int = one_third + 1 lowerCamelCase_ : Dict = two_third - 1 else: return -1 def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if left < right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) lowerCamelCase_ : Union[str, Any] = (left + right) // 3 + 1 lowerCamelCase_ : Any = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) else: return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() __magic_name__ = input('''Enter numbers separated by comma:\n''').strip() __magic_name__ = [int(item.strip()) for item in user_input.split(''',''')] assert collection == sorted(collection), f"List must be ordered.\n{collection}." __magic_name__ = int(input('''Enter the number to be found in the list:\n''').strip()) __magic_name__ = ite_ternary_search(collection, target) __magic_name__ = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f'''Iterative search: {target} found at positions: {resulta}''') print(f'''Recursive search: {target} found at positions: {resulta}''') else: print('''Not found''')
73
from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Dict = '''ctrl''' __UpperCAmelCase : Dict = ['''past_key_values'''] __UpperCAmelCase : int = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , a_=24_6534 , a_=256 , a_=1280 , a_=8192 , a_=48 , a_=16 , a_=0.1 , a_=0.1 , a_=1E-6 , a_=0.02 , a_=True , **a_ , ): lowerCamelCase_ : Dict = vocab_size lowerCamelCase_ : Any = n_positions lowerCamelCase_ : Optional[int] = n_embd lowerCamelCase_ : List[Any] = n_layer lowerCamelCase_ : Union[str, Any] = n_head lowerCamelCase_ : str = dff lowerCamelCase_ : Tuple = resid_pdrop lowerCamelCase_ : Any = embd_pdrop lowerCamelCase_ : Dict = layer_norm_epsilon lowerCamelCase_ : Tuple = initializer_range lowerCamelCase_ : Any = use_cache super().__init__(**a_ )
73
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''edbeeching/decision-transformer-gym-hopper-medium''': ( '''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json''' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : int = '''decision_transformer''' __UpperCAmelCase : Any = ['''past_key_values'''] __UpperCAmelCase : Optional[int] = { '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , a_=17 , a_=4 , a_=128 , a_=4096 , a_=True , a_=1 , a_=1024 , a_=3 , a_=1 , a_=None , a_="relu" , a_=0.1 , a_=0.1 , a_=0.1 , a_=1E-5 , a_=0.02 , a_=True , a_=True , a_=5_0256 , a_=5_0256 , a_=False , a_=False , **a_ , ): lowerCamelCase_ : Optional[int] = state_dim lowerCamelCase_ : str = act_dim lowerCamelCase_ : int = hidden_size lowerCamelCase_ : Any = max_ep_len lowerCamelCase_ : Optional[Any] = action_tanh lowerCamelCase_ : Tuple = vocab_size lowerCamelCase_ : List[Any] = n_positions lowerCamelCase_ : Optional[Any] = n_layer lowerCamelCase_ : Any = n_head lowerCamelCase_ : Optional[Any] = n_inner lowerCamelCase_ : List[Any] = activation_function lowerCamelCase_ : Union[str, Any] = resid_pdrop lowerCamelCase_ : Union[str, Any] = embd_pdrop lowerCamelCase_ : str = attn_pdrop lowerCamelCase_ : Union[str, Any] = layer_norm_epsilon lowerCamelCase_ : List[str] = initializer_range lowerCamelCase_ : Tuple = scale_attn_weights lowerCamelCase_ : Dict = use_cache lowerCamelCase_ : str = scale_attn_by_inverse_layer_idx lowerCamelCase_ : Dict = reorder_and_upcast_attn lowerCamelCase_ : str = bos_token_id lowerCamelCase_ : Tuple = eos_token_id super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ )
73
import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process __magic_name__ = logging.getLogger(__name__) __magic_name__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) __magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCAmelCase__ : """simple docstring""" __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) }, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCamelCase )}, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) }, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, ) __UpperCAmelCase : bool = field( default=__lowerCamelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, ) __UpperCAmelCase : str = field( default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, ) __UpperCAmelCase : bool = field( default=__lowerCamelCase, metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) }, ) def _UpperCamelCase ( self ): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class lowerCAmelCase__ : """simple docstring""" __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __UpperCAmelCase : Optional[str] = field(default=__lowerCamelCase, metadata={'''help''': '''The input training data file (a text file).'''} ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''}, ) __UpperCAmelCase : Optional[str] = field( default=__lowerCamelCase, metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''}, ) __UpperCAmelCase : bool = field( default=__lowerCamelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) __UpperCAmelCase : Optional[int] = field( default=5, metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' }, ) __UpperCAmelCase : Optional[int] = field( default=__lowerCamelCase, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated. Default to the max input length of the model.''' ) }, ) __UpperCAmelCase : Optional[int] = field( default=__lowerCamelCase, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, ) __UpperCAmelCase : float = field( default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} ) __UpperCAmelCase : bool = field( default=__lowerCamelCase, metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) }, ) def _UpperCamelCase ( self ): if self.train_file is not None: lowerCamelCase_ : str = self.train_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: lowerCamelCase_ : Union[str, Any] = self.validation_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' with open(lowerCAmelCase_ , "r" , encoding="utf-8") as f: lowerCamelCase_ : Tuple = [json.loads(lowerCAmelCase_) for line in f.read().splitlines() if (len(lowerCAmelCase_) > 0 and not line.isspace())] assert len(lowerCAmelCase_) == len(lowerCAmelCase_) lowerCamelCase_ : Any = {c: dataset[c] for c in dataset.column_names} lowerCamelCase_ : List[Any] = refs return Dataset.from_dict(lowerCAmelCase_) def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : str = parser.parse_args_into_dataclasses() # Detecting last checkpoint. lowerCamelCase_ : List[str] = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: lowerCamelCase_ : Dict = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome.") elif last_checkpoint is not None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch.") # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""") # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , lowerCAmelCase_) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowerCamelCase_ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name) if "validation" not in datasets.keys(): lowerCamelCase_ : Any = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , ) lowerCamelCase_ : Optional[int] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , ) else: lowerCamelCase_ : Dict = {} if data_args.train_file is not None: lowerCamelCase_ : str = data_args.train_file if data_args.validation_file is not None: lowerCamelCase_ : Any = data_args.validation_file lowerCamelCase_ : Any = data_args.train_file.split(".")[-1] if extension == "txt": lowerCamelCase_ : List[str] = "text" lowerCamelCase_ : Dict = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCamelCase_ : Optional[Any] = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_) elif model_args.model_name_or_path: lowerCamelCase_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_) else: lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(F"""Overriding config: {model_args.config_overrides}""") config.update_from_string(model_args.config_overrides) logger.info(F"""New config: {config}""") lowerCamelCase_ : List[str] = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: lowerCamelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_) elif model_args.model_name_or_path: lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name.") if model_args.model_name_or_path: lowerCamelCase_ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch") lowerCamelCase_ : Dict = AutoModelForMaskedLM.from_config(lowerCAmelCase_) model.resize_token_embeddings(len(lowerCAmelCase_)) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: lowerCamelCase_ : Optional[Any] = datasets["train"].column_names else: lowerCamelCase_ : Dict = datasets["validation"].column_names lowerCamelCase_ : Union[str, Any] = "text" if "text" in column_names else column_names[0] lowerCamelCase_ : Optional[Any] = "max_length" if data_args.pad_to_max_length else False def tokenize_function(lowerCAmelCase_): # Remove empty lines lowerCamelCase_ : str = [line for line in examples["text"] if len(lowerCAmelCase_) > 0 and not line.isspace()] return tokenizer(examples["text"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length) lowerCamelCase_ : str = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: lowerCamelCase_ : List[Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file) if data_args.validation_ref_file is not None: lowerCamelCase_ : List[str] = add_chinese_references( tokenized_datasets["validation"] , data_args.validation_ref_file) # If we have ref files, need to avoid it removed by trainer lowerCamelCase_ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file if has_ref: lowerCamelCase_ : Union[str, Any] = False # Data collator # This one will take care of randomly masking the tokens. lowerCamelCase_ : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability) # Initialize our Trainer lowerCamelCase_ : int = Trainer( model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , ) # Training if training_args.do_train: if last_checkpoint is not None: lowerCamelCase_ : Dict = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path): lowerCamelCase_ : Dict = model_args.model_name_or_path else: lowerCamelCase_ : int = None lowerCamelCase_ : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_) trainer.save_model() # Saves the tokenizer too for easy upload lowerCamelCase_ : Tuple = os.path.join(training_args.output_dir , "train_results.txt") if trainer.is_world_process_zero(): with open(lowerCAmelCase_ , "w") as writer: logger.info("***** Train results *****") for key, value in sorted(train_result.metrics.items()): logger.info(F""" {key} = {value}""") writer.write(F"""{key} = {value}\n""") # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json")) # Evaluation lowerCamelCase_ : Dict = {} if training_args.do_eval: logger.info("*** Evaluate ***") lowerCamelCase_ : Tuple = trainer.evaluate() lowerCamelCase_ : str = math.exp(eval_output["eval_loss"]) lowerCamelCase_ : Tuple = perplexity lowerCamelCase_ : int = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt") if trainer.is_world_process_zero(): with open(lowerCAmelCase_ , "w") as writer: logger.info("***** Eval results *****") for key, value in sorted(results.items()): logger.info(F""" {key} = {value}""") writer.write(F"""{key} = {value}\n""") return results def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' main() if __name__ == "__main__": main()
73
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''', # See all LeViT models at https://huggingface.co/models?filter=levit } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Any = '''levit''' def __init__( self , a_=224 , a_=3 , a_=3 , a_=2 , a_=1 , a_=16 , a_=[128, 256, 384] , a_=[4, 8, 12] , a_=[4, 4, 4] , a_=[16, 16, 16] , a_=0 , a_=[2, 2, 2] , a_=[2, 2, 2] , a_=0.02 , **a_ , ): super().__init__(**a_ ) lowerCamelCase_ : Optional[Any] = image_size lowerCamelCase_ : int = num_channels lowerCamelCase_ : Tuple = kernel_size lowerCamelCase_ : List[Any] = stride lowerCamelCase_ : Optional[Any] = padding lowerCamelCase_ : List[str] = hidden_sizes lowerCamelCase_ : Tuple = num_attention_heads lowerCamelCase_ : Dict = depths lowerCamelCase_ : Optional[Any] = key_dim lowerCamelCase_ : List[str] = drop_path_rate lowerCamelCase_ : Optional[Any] = patch_size lowerCamelCase_ : Union[str, Any] = attention_ratio lowerCamelCase_ : Optional[int] = mlp_ratio lowerCamelCase_ : List[Any] = initializer_range lowerCamelCase_ : Tuple = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Any = version.parse('''1.11''' ) @property def _UpperCamelCase ( self ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _UpperCamelCase ( self ): return 1E-4
73
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class lowerCAmelCase__ : """simple docstring""" # setable values __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Optional[jnp.ndarray] = None __UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def _UpperCamelCase ( cls ): return cls() @dataclass class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : KarrasVeSchedulerState class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase ): """simple docstring""" @property def _UpperCamelCase ( self ): return True @register_to_config def __init__( self , a_ = 0.02 , a_ = 100 , a_ = 1.0_07 , a_ = 80 , a_ = 0.05 , a_ = 50 , ): pass def _UpperCamelCase ( self ): return KarrasVeSchedulerState.create() def _UpperCamelCase ( self , a_ , a_ , a_ = () ): lowerCamelCase_ : List[Any] = jnp.arange(0 , a_ )[::-1].copy() lowerCamelCase_ : List[str] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=a_ , schedule=jnp.array(a_ , dtype=jnp.floataa ) , timesteps=a_ , ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , ): if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase_ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: lowerCamelCase_ : Optional[int] = 0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase_ : Union[str, Any] = random.split(a_ , num=1 ) lowerCamelCase_ : str = self.config.s_noise * random.normal(key=a_ , shape=sample.shape ) lowerCamelCase_ : List[str] = sigma + gamma * sigma lowerCamelCase_ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ = True , ): lowerCamelCase_ : List[str] = sample_hat + sigma_hat * model_output lowerCamelCase_ : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat lowerCamelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ): lowerCamelCase_ : Optional[Any] = sample_prev + sigma_prev * model_output lowerCamelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev lowerCamelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ): raise NotImplementedError()
73
1
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Any = '''gpt_neo''' __UpperCAmelCase : List[Any] = ['''past_key_values'''] __UpperCAmelCase : Tuple = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , a_=5_0257 , a_=2048 , a_=2048 , a_=24 , a_=[[["global", "local"], 12]] , a_=16 , a_=None , a_=256 , a_="gelu_new" , a_=0.0 , a_=0.0 , a_=0.0 , a_=0.1 , a_=1E-5 , a_=0.02 , a_=True , a_=5_0256 , a_=5_0256 , **a_ , ): lowerCamelCase_ : int = vocab_size lowerCamelCase_ : Optional[int] = max_position_embeddings lowerCamelCase_ : List[str] = hidden_size lowerCamelCase_ : List[Any] = num_layers lowerCamelCase_ : Any = num_heads lowerCamelCase_ : Tuple = intermediate_size lowerCamelCase_ : Dict = window_size lowerCamelCase_ : List[str] = activation_function lowerCamelCase_ : Optional[Any] = resid_dropout lowerCamelCase_ : Tuple = embed_dropout lowerCamelCase_ : List[Any] = attention_dropout lowerCamelCase_ : Union[str, Any] = classifier_dropout lowerCamelCase_ : Tuple = layer_norm_epsilon lowerCamelCase_ : Dict = initializer_range lowerCamelCase_ : int = use_cache lowerCamelCase_ : Dict = bos_token_id lowerCamelCase_ : List[str] = eos_token_id lowerCamelCase_ : Tuple = attention_types lowerCamelCase_ : Union[str, Any] = self.expand_attention_types_params(a_ ) if len(self.attention_layers ) != self.num_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.attention_layers)` == `config.num_layers` " F"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """ F"""`config.num_layers = {self.num_layers}`. """ "`config.attention_layers` is prepared using `config.attention_types`. " "Please verify the value of `config.attention_types` argument." ) super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ ) @staticmethod def _UpperCamelCase ( a_ ): lowerCamelCase_ : List[str] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' import torch lowerCamelCase_ : int = input.size() lowerCamelCase_ : Tuple = len(lowerCAmelCase_) lowerCamelCase_ : str = shape[dimension] lowerCamelCase_ : Tuple = torch.arange(0 , lowerCAmelCase_ , lowerCAmelCase_) lowerCamelCase_ : Optional[Any] = torch.div(sizedim - size , lowerCAmelCase_ , rounding_mode="floor") + 1 lowerCamelCase_ : int = torch.arange(lowerCAmelCase_) + low_indices[:min_length][:, None] lowerCamelCase_ : Optional[int] = [slice(lowerCAmelCase_)] * rank lowerCamelCase_ : Any = indices lowerCamelCase_ : Optional[int] = input[s] lowerCamelCase_ : Dict = list(range(0 , rank + 1)) perm.append(perm.pop(dimension + 1)) return sliced.permute(lowerCAmelCase_) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' import torch lowerCamelCase_ : int = torch.arange(1 , lowerCAmelCase_) lowerCamelCase_ : str = torch.remainder(lowerCAmelCase_ , lowerCAmelCase_) lowerCamelCase_ : Union[str, Any] = remainders == 0 lowerCamelCase_ : int = candidates[divisor_indices] lowerCamelCase_ : Dict = torch.max(lowerCAmelCase_) return largest_divisor, torch.div(lowerCAmelCase_ , lowerCAmelCase_ , rounding_mode="floor") class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" @property def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(a_ , direction="inputs" ) lowerCamelCase_ : Optional[int] = {0: "batch", 1: "past_sequence + sequence"} else: lowerCamelCase_ : Any = {0: "batch", 1: "sequence"} return common_inputs @property def _UpperCamelCase ( self ): return self._config.num_heads def _UpperCamelCase ( self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , ): lowerCamelCase_ : List[str] = super(a_ , self ).generate_dummy_inputs( a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ ) # We need to order the input in the way they appears in the forward() lowerCamelCase_ : str = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase_ ,lowerCamelCase_ : Union[str, Any] = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCamelCase_ : Any = seqlen + 2 lowerCamelCase_ : List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCamelCase_ : str = [ (torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(self.num_layers ) ] lowerCamelCase_ : List[str] = common_inputs["attention_mask"] if self.use_past: lowerCamelCase_ : Any = ordered_inputs["attention_mask"].dtype lowerCamelCase_ : Optional[Any] = torch.cat( [ordered_inputs["attention_mask"], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 ) return ordered_inputs @property def _UpperCamelCase ( self ): return 13
73
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Any = StableDiffusionDiffEditPipeline __UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} __UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} __UpperCAmelCase : List[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __UpperCAmelCase : List[str] = frozenset([] ) def _UpperCamelCase ( self ): torch.manual_seed(0 ) lowerCamelCase_ : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , ) lowerCamelCase_ : str = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , ) lowerCamelCase_ : Dict = DDIMInverseScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_zero=a_ , ) torch.manual_seed(0 ) lowerCamelCase_ : List[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCamelCase_ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) lowerCamelCase_ : Optional[Any] = CLIPTextModel(a_ ) lowerCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowerCamelCase_ : Optional[Any] = { "unet": unet, "scheduler": scheduler, "inverse_scheduler": inverse_scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def _UpperCamelCase ( self , a_ , a_=0 ): lowerCamelCase_ : str = floats_tensor((1, 16, 16) , rng=random.Random(a_ ) ).to(a_ ) lowerCamelCase_ : List[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a_ ) ).to(a_ ) if str(a_ ).startswith("mps" ): lowerCamelCase_ : List[Any] = torch.manual_seed(a_ ) else: lowerCamelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(a_ ) lowerCamelCase_ : Tuple = { "prompt": "a dog and a newt", "mask_image": mask, "image_latents": latents, "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _UpperCamelCase ( self , a_ , a_=0 ): lowerCamelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ ) lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase_ : Any = Image.fromarray(np.uinta(a_ ) ).convert("RGB" ) if str(a_ ).startswith("mps" ): lowerCamelCase_ : Tuple = torch.manual_seed(a_ ) else: lowerCamelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(a_ ) lowerCamelCase_ : int = { "image": image, "source_prompt": "a cat and a frog", "target_prompt": "a dog and a newt", "generator": generator, "num_inference_steps": 2, "num_maps_per_mask": 2, "mask_encode_strength": 1.0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _UpperCamelCase ( self , a_ , a_=0 ): lowerCamelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ ) lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase_ : Optional[int] = Image.fromarray(np.uinta(a_ ) ).convert("RGB" ) if str(a_ ).startswith("mps" ): lowerCamelCase_ : Optional[int] = torch.manual_seed(a_ ) else: lowerCamelCase_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ ) lowerCamelCase_ : Union[str, Any] = { "image": image, "prompt": "a cat and a frog", "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "decode_latents": True, "output_type": "numpy", } return inputs def _UpperCamelCase ( self ): if not hasattr(self.pipeline_class , "_optional_components" ): return lowerCamelCase_ : List[Any] = self.get_dummy_components() lowerCamelCase_ : int = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(a_ , a_ , a_ ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) lowerCamelCase_ : int = self.get_dummy_inputs(a_ ) lowerCamelCase_ : int = pipe(**a_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(a_ ) lowerCamelCase_ : Optional[int] = self.pipeline_class.from_pretrained(a_ ) pipe_loaded.to(a_ ) pipe_loaded.set_progress_bar_config(disable=a_ ) for optional_component in pipe._optional_components: self.assertTrue( getattr(a_ , a_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , ) lowerCamelCase_ : List[str] = self.get_dummy_inputs(a_ ) lowerCamelCase_ : Optional[int] = pipe_loaded(**a_ )[0] lowerCamelCase_ : Optional[int] = np.abs(output - output_loaded ).max() self.assertLess(a_ , 1E-4 ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = "cpu" lowerCamelCase_ : int = self.get_dummy_components() lowerCamelCase_ : List[Any] = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : Any = self.get_dummy_mask_inputs(a_ ) lowerCamelCase_ : int = pipe.generate_mask(**a_ ) lowerCamelCase_ : List[Any] = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) lowerCamelCase_ : List[str] = np.array([0] * 9 ) lowerCamelCase_ : Optional[int] = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_ , 1E-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = "cpu" lowerCamelCase_ : Union[str, Any] = self.get_dummy_components() lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : Dict = self.get_dummy_inversion_inputs(a_ ) lowerCamelCase_ : Dict = pipe.invert(**a_ ).images lowerCamelCase_ : str = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowerCamelCase_ : Dict = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_ , 1E-3 ) def _UpperCamelCase ( self ): super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = "cpu" lowerCamelCase_ : int = self.get_dummy_components() lowerCamelCase_ : int = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"} lowerCamelCase_ : Optional[Any] = DPMSolverMultistepScheduler(**a_ ) lowerCamelCase_ : List[str] = DPMSolverMultistepInverseScheduler(**a_ ) lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : int = self.get_dummy_inversion_inputs(a_ ) lowerCamelCase_ : str = pipe.invert(**a_ ).images lowerCamelCase_ : int = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowerCamelCase_ : Union[str, Any] = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) lowerCamelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_ , 1E-3 ) @require_torch_gpu @slow class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def _UpperCamelCase ( cls ): lowerCamelCase_ : Dict = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" ) lowerCamelCase_ : int = raw_image.convert("RGB" ).resize((768, 768) ) lowerCamelCase_ : List[Any] = raw_image def _UpperCamelCase ( self ): lowerCamelCase_ : Dict = torch.manual_seed(0 ) lowerCamelCase_ : Tuple = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa ) lowerCamelCase_ : str = DDIMScheduler.from_config(pipe.scheduler.config ) lowerCamelCase_ : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : str = "a bowl of fruit" lowerCamelCase_ : Optional[int] = "a bowl of pears" lowerCamelCase_ : List[Any] = pipe.generate_mask( image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , ) lowerCamelCase_ : str = pipe.invert( prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ ).latents lowerCamelCase_ : List[str] = pipe( prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0] lowerCamelCase_ : List[str] = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 ) lowerCamelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa ) lowerCamelCase_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) lowerCamelCase_ : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ : Any = "a bowl of fruit" lowerCamelCase_ : Dict = "a bowl of pears" lowerCamelCase_ : Optional[Any] = pipe.generate_mask( image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , ) lowerCamelCase_ : str = pipe.invert( prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ , num_inference_steps=25 , ).latents lowerCamelCase_ : Any = pipe( prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0] lowerCamelCase_ : List[str] = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
73
1
import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) __magic_name__ = logging.getLogger() def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Any = {} lowerCamelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , "all_results.json") if os.path.exists(lowerCAmelCase_): with open(lowerCAmelCase_ , "r") as f: lowerCamelCase_ : List[Any] = json.load(lowerCAmelCase_) else: raise ValueError(F"""can't find {path}""") return results __magic_name__ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def _UpperCamelCase ( self ): import xla_spawn lowerCamelCase_ : Optional[int] = self.get_auto_remove_tmp_dir() lowerCamelCase_ : List[Any] = F""" ./examples/pytorch/text-classification/run_glue.py --num_cores=8 ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --debug tpu_metrics_debug --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(a_ , "argv" , a_ ): lowerCamelCase_ : Union[str, Any] = time() xla_spawn.main() lowerCamelCase_ : Optional[Any] = time() lowerCamelCase_ : int = get_results(a_ ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 500 ) def _UpperCamelCase ( self ): import xla_spawn lowerCamelCase_ : Optional[int] = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split() with patch.object(a_ , "argv" , a_ ): xla_spawn.main()
73
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase ( self ): lowerCamelCase_ : int = ["a", "b", "c"] # Defaults to last layer if both are None lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , a_ , a_ ) self.assertEqual(a_ , ["c"] ) self.assertEqual(a_ , [2] ) # Out indices set to match out features lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ ) self.assertEqual(a_ , ["a", "c"] ) self.assertEqual(a_ , [0, 2] ) # Out features set to match out indices lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ ) self.assertEqual(a_ , ["a", "c"] ) self.assertEqual(a_ , [0, 2] ) # Out features selected from negative indices lowerCamelCase_ ,lowerCamelCase_ : Dict = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ ) self.assertEqual(a_ , ["a", "c"] ) self.assertEqual(a_ , [-3, -1] ) def _UpperCamelCase ( self ): # Stage names must be set with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ ) # Out features must be a list with self.assertRaises(a_ ): verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] ) # Out features must be a subset of stage names with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] ) # Out indices must be a list or tuple with self.assertRaises(a_ ): verify_out_features_out_indices(a_ , 0 , ["a", "b"] ) # Out indices must be a subset of stage names with self.assertRaises(a_ ): verify_out_features_out_indices(a_ , (0, 1) , ["a"] ) # Out features and out indices must be the same length with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] ) # Out features should match out indices with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] ) # Out features and out indices should be in order with self.assertRaises(a_ ): verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] ) # Check passes with valid inputs verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = BackboneMixin() lowerCamelCase_ : List[Any] = ["a", "b", "c"] lowerCamelCase_ : Optional[int] = ["a", "c"] lowerCamelCase_ : Dict = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly lowerCamelCase_ : Union[str, Any] = ["a", "b"] self.assertEqual(backbone.out_features , ["a", "b"] ) self.assertEqual(backbone.out_indices , [0, 1] ) lowerCamelCase_ : str = [-3, -1] self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [-3, -1] )
73
1
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss __magic_name__ = pytest.mark.integration @require_faiss class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(a_ ) for x in np.arange(30 ).tolist()]} ) return dset def _UpperCamelCase ( self ): import faiss lowerCamelCase_ : Dataset = self._create_dummy_dataset() lowerCamelCase_ : Dict = dset.map( lambda a_ , a_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=a_ , keep_in_memory=a_ ) lowerCamelCase_ : Optional[int] = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) lowerCamelCase_ ,lowerCamelCase_ : str = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) dset.drop_index("vecs" ) def _UpperCamelCase ( self ): import faiss lowerCamelCase_ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def _UpperCamelCase ( self ): import faiss lowerCamelCase_ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=a_ ) as tmp_file: dset.save_faiss_index("vecs" , tmp_file.name ) dset.load_faiss_index("vecs2" , tmp_file.name ) os.unlink(tmp_file.name ) lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def _UpperCamelCase ( self ): lowerCamelCase_ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" ) dset.drop_index("vecs" ) self.assertRaises(a_ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) ) def _UpperCamelCase ( self ): from elasticsearch import Elasticsearch lowerCamelCase_ : Dataset = self._create_dummy_dataset() with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: lowerCamelCase_ : Optional[Any] = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30 ) lowerCamelCase_ : Optional[int] = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} lowerCamelCase_ : Dict = Elasticsearch() dset.add_elasticsearch_index("filename" , es_client=a_ ) lowerCamelCase_ ,lowerCamelCase_ : str = dset.get_nearest_examples("filename" , "my_name-train_29" ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) @require_faiss class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def _UpperCamelCase ( self ): import faiss lowerCamelCase_ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query lowerCamelCase_ : Optional[int] = np.zeros(5 , dtype=np.floataa ) lowerCamelCase_ : Tuple = 1 lowerCamelCase_ ,lowerCamelCase_ : Tuple = index.search(a_ ) self.assertRaises(a_ , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries lowerCamelCase_ : int = np.eye(5 , dtype=np.floataa )[::-1] lowerCamelCase_ ,lowerCamelCase_ : Any = index.search_batch(a_ ) self.assertRaises(a_ , index.search_batch , queries[0] ) lowerCamelCase_ : int = [scores[0] for scores in total_scores] lowerCamelCase_ : Any = [indices[0] for indices in total_indices] self.assertGreater(np.min(a_ ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , a_ ) def _UpperCamelCase ( self ): import faiss lowerCamelCase_ : Tuple = FaissIndex(string_factory="Flat" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) lowerCamelCase_ : str = FaissIndex(string_factory="LSH" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(a_ ): lowerCamelCase_ : int = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) ) def _UpperCamelCase ( self ): import faiss lowerCamelCase_ : Union[str, Any] = faiss.IndexFlat(5 ) lowerCamelCase_ : Optional[int] = FaissIndex(custom_index=a_ ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def _UpperCamelCase ( self ): import faiss lowerCamelCase_ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=a_ ) as tmp_file: index.save(tmp_file.name ) lowerCamelCase_ : str = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) lowerCamelCase_ : Tuple = np.zeros(5 , dtype=np.floataa ) lowerCamelCase_ : Dict = 1 lowerCamelCase_ ,lowerCamelCase_ : Tuple = index.search(a_ ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' import faiss lowerCamelCase_ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT) index.add_vectors(np.eye(5 , dtype=np.floataa)) lowerCamelCase_ : Optional[Any] = "index.faiss" lowerCamelCase_ : int = F"""mock://{index_name}""" index.save(lowerCAmelCase_ , storage_options=mockfs.storage_options) lowerCamelCase_ : str = FaissIndex.load(lowerCAmelCase_ , storage_options=mockfs.storage_options) lowerCamelCase_ : Any = np.zeros(5 , dtype=np.floataa) lowerCamelCase_ : Optional[int] = 1 lowerCamelCase_ ,lowerCamelCase_ : Union[str, Any] = index.search(lowerCAmelCase_) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def _UpperCamelCase ( self ): from elasticsearch import Elasticsearch with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: lowerCamelCase_ : Optional[Any] = Elasticsearch() lowerCamelCase_ : Tuple = {"acknowledged": True} lowerCamelCase_ : List[str] = ElasticSearchIndex(es_client=a_ ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["foo", "bar", "foobar"] ) # single query lowerCamelCase_ : List[str] = "foo" lowerCamelCase_ : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} lowerCamelCase_ ,lowerCamelCase_ : Union[str, Any] = index.search(a_ ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout lowerCamelCase_ : List[Any] = "foo" lowerCamelCase_ : List[str] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = index.search(a_ , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries lowerCamelCase_ : Union[str, Any] = ["foo", "bar", "foobar"] lowerCamelCase_ : Any = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} lowerCamelCase_ ,lowerCamelCase_ : str = index.search_batch(a_ ) lowerCamelCase_ : Optional[int] = [scores[0] for scores in total_scores] lowerCamelCase_ : Tuple = [indices[0] for indices in total_indices] self.assertGreater(np.min(a_ ) , 0 ) self.assertListEqual([1, 1, 1] , a_ ) # batched queries with timeout lowerCamelCase_ : int = ["foo", "bar", "foobar"] lowerCamelCase_ : int = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} lowerCamelCase_ ,lowerCamelCase_ : Union[str, Any] = index.search_batch(a_ , request_timeout=30 ) lowerCamelCase_ : Tuple = [scores[0] for scores in total_scores] lowerCamelCase_ : List[Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(a_ ) , 0 ) self.assertListEqual([1, 1, 1] , a_ )
73
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils ) __UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] ) __UpperCAmelCase : Tuple = ['''accelerate''', '''launch'''] __UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate''' __UpperCAmelCase : int = '''default_config.yaml''' __UpperCAmelCase : Tuple = config_folder / config_file __UpperCAmelCase : int = config_folder / '''_default_config.yaml''' __UpperCAmelCase : int = Path('''tests/test_configs''' ) @classmethod def _UpperCamelCase ( cls ): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def _UpperCamelCase ( cls ): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def _UpperCamelCase ( self ): for config in sorted(self.test_config_path.glob("**/*.yaml" ) ): with self.subTest(config_file=a_ ): execute_subprocess_async( self.base_cmd + ["--config_file", str(a_ ), self.test_file_path] , env=os.environ.copy() ) def _UpperCamelCase ( self ): execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() ) class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase : List[Any] = '''test-tpu''' __UpperCAmelCase : Tuple = '''us-central1-a''' __UpperCAmelCase : Tuple = '''ls''' __UpperCAmelCase : str = ['''accelerate''', '''tpu-config'''] __UpperCAmelCase : Dict = '''cd /usr/share''' __UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh''' __UpperCAmelCase : Dict = '''Running gcloud compute tpus tpu-vm ssh''' def _UpperCamelCase ( self ): lowerCamelCase_ : Any = run_command( self.cmd + ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = run_command( self.cmd + [ "--config_file", "tests/test_configs/0_12_0.yaml", "--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug", ] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Union[str, Any] = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a_ ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Any = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = run_command( self.cmd + [ "--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--command", "echo \"Hello World\"", "--debug", ] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Dict = run_command( self.cmd + [ "--config_file", "tests/test_configs/0_12_0.yaml", "--command_file", self.command_file, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug", ] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : str = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Any = run_command( self.cmd + [ "--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--accelerate_version", "12.0.0", "--debug", ] , return_stdout=a_ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
73
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=False): '''simple docstring''' lowerCamelCase_ : Optional[Any] = [] for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""")) rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""")) rename_keys.append( (F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""")) rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""")) rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""")) rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""")) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""")) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""")) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""")) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""")) # projection layer + position embeddings rename_keys.extend( [ ("module.cls_token", "vit.embeddings.cls_token"), ("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("module.pos_embed", "vit.embeddings.position_embeddings"), ]) if base_model: # layernorm + pooler rename_keys.extend( [ ("module.norm.weight", "layernorm.weight"), ("module.norm.bias", "layernorm.bias"), ]) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCamelCase_ : Optional[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit") else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ]) return rename_keys def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False): '''simple docstring''' for i in range(config.num_hidden_layers): if base_model: lowerCamelCase_ : Tuple = "" else: lowerCamelCase_ : str = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase_ : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""") lowerCamelCase_ : int = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""") # next, add query, keys and values (in that order) to the state dict lowerCamelCase_ : Dict = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase_ : Optional[Any] = in_proj_bias[: config.hidden_size] lowerCamelCase_ : int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase_ : Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase_ : Optional[int] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase_ : Dict = in_proj_bias[-config.hidden_size :] def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Dict = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_) def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Union[str, Any] = [ "module.fc.fc1.weight", "module.fc.fc1.bias", "module.fc.bn1.weight", "module.fc.bn1.bias", "module.fc.bn1.running_mean", "module.fc.bn1.running_var", "module.fc.bn1.num_batches_tracked", "module.fc.fc2.weight", "module.fc.fc2.bias", "module.fc.bn2.weight", "module.fc.bn2.bias", "module.fc.bn2.running_mean", "module.fc.bn2.running_var", "module.fc.bn2.num_batches_tracked", "module.fc.fc3.weight", "module.fc.fc3.bias", ] for k in ignore_keys: state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[Any] = dct.pop(lowerCAmelCase_) lowerCamelCase_ : List[str] = val def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : List[str] = ViTMSNConfig() lowerCamelCase_ : Any = 1000 lowerCamelCase_ : Dict = "datasets/huggingface/label-files" lowerCamelCase_ : int = "imagenet-1k-id2label.json" lowerCamelCase_ : Any = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_) , "r")) lowerCamelCase_ : Optional[Any] = {int(lowerCAmelCase_): v for k, v in idalabel.items()} lowerCamelCase_ : str = idalabel lowerCamelCase_ : Tuple = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: lowerCamelCase_ : List[Any] = 384 lowerCamelCase_ : Union[str, Any] = 1536 lowerCamelCase_ : str = 6 elif "l16" in checkpoint_url: lowerCamelCase_ : Union[str, Any] = 1024 lowerCamelCase_ : List[str] = 4096 lowerCamelCase_ : Optional[int] = 24 lowerCamelCase_ : str = 16 lowerCamelCase_ : Optional[Any] = 0.1 elif "b4" in checkpoint_url: lowerCamelCase_ : Union[str, Any] = 4 elif "l7" in checkpoint_url: lowerCamelCase_ : Optional[Any] = 7 lowerCamelCase_ : Tuple = 1024 lowerCamelCase_ : Dict = 4096 lowerCamelCase_ : Dict = 24 lowerCamelCase_ : Optional[Any] = 16 lowerCamelCase_ : str = 0.1 lowerCamelCase_ : str = ViTMSNModel(lowerCAmelCase_) lowerCamelCase_ : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="cpu")["target_encoder"] lowerCamelCase_ : Optional[Any] = ViTImageProcessor(size=config.image_size) remove_projection_head(lowerCAmelCase_) lowerCamelCase_ : Optional[int] = create_rename_keys(lowerCAmelCase_ , base_model=lowerCAmelCase_) for src, dest in rename_keys: rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , base_model=lowerCAmelCase_) model.load_state_dict(lowerCAmelCase_) model.eval() lowerCamelCase_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase_ : str = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_).raw) lowerCamelCase_ : List[str] = ViTImageProcessor( size=config.image_size , image_mean=lowerCAmelCase_ , image_std=lowerCAmelCase_) lowerCamelCase_ : str = image_processor(images=lowerCAmelCase_ , return_tensors="pt") # forward pass torch.manual_seed(2) lowerCamelCase_ : int = model(**lowerCAmelCase_) lowerCamelCase_ : Tuple = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: lowerCamelCase_ : Tuple = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]]) elif "b16" in checkpoint_url: lowerCamelCase_ : List[str] = torch.tensor([[14.28_89, -18.90_45, 11.72_81]]) elif "l16" in checkpoint_url: lowerCamelCase_ : Dict = torch.tensor([[41.50_28, -22.86_81, 45.64_75]]) elif "b4" in checkpoint_url: lowerCamelCase_ : Any = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]]) else: lowerCamelCase_ : str = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]]) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCAmelCase_ , atol=1E-4) print(F"""Saving model to {pytorch_dump_folder_path}""") model.save_pretrained(lowerCAmelCase_) print(F"""Saving image processor to {pytorch_dump_folder_path}""") image_processor.save_pretrained(lowerCAmelCase_) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) __magic_name__ = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
73
import inspect from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel, VQModel from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def __init__( self , a_ , a_ , a_ ): super().__init__() self.register_modules(vqvae=a_ , unet=a_ , scheduler=a_ ) @torch.no_grad() def __call__( self , a_ = 1 , a_ = None , a_ = 0.0 , a_ = 50 , a_ = "pil" , a_ = True , **a_ , ): lowerCamelCase_ : Optional[Any] = randn_tensor( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , ) lowerCamelCase_ : Optional[int] = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowerCamelCase_ : Optional[int] = latents * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(a_ ) # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature lowerCamelCase_ : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCamelCase_ : Optional[int] = {} if accepts_eta: lowerCamelCase_ : Optional[int] = eta for t in self.progress_bar(self.scheduler.timesteps ): lowerCamelCase_ : Dict = self.scheduler.scale_model_input(a_ , a_ ) # predict the noise residual lowerCamelCase_ : Optional[Any] = self.unet(a_ , a_ ).sample # compute the previous noisy sample x_t -> x_t-1 lowerCamelCase_ : List[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample # decode the image latents with the VAE lowerCamelCase_ : str = self.vqvae.decode(a_ ).sample lowerCamelCase_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 ) lowerCamelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCamelCase_ : Optional[Any] = self.numpy_to_pil(a_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=a_ )
73
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''', } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : List[str] = '''git_vision_model''' def __init__( self , a_=768 , a_=3072 , a_=12 , a_=12 , a_=3 , a_=224 , a_=16 , a_="quick_gelu" , a_=1E-5 , a_=0.0 , a_=0.02 , **a_ , ): super().__init__(**a_ ) lowerCamelCase_ : Any = hidden_size lowerCamelCase_ : List[str] = intermediate_size lowerCamelCase_ : List[str] = num_hidden_layers lowerCamelCase_ : Dict = num_attention_heads lowerCamelCase_ : Tuple = num_channels lowerCamelCase_ : Union[str, Any] = patch_size lowerCamelCase_ : Optional[int] = image_size lowerCamelCase_ : List[str] = initializer_range lowerCamelCase_ : Optional[Any] = attention_dropout lowerCamelCase_ : Dict = layer_norm_eps lowerCamelCase_ : Optional[Any] = hidden_act @classmethod def _UpperCamelCase ( cls , a_ , **a_ ): cls._set_token_in_kwargs(a_ ) lowerCamelCase_ ,lowerCamelCase_ : List[Any] = cls.get_config_dict(a_ , **a_ ) # get the vision config dict if we are loading from GITConfig if config_dict.get("model_type" ) == "git": lowerCamelCase_ : Dict = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(a_ , **a_ ) class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : str = '''git''' def __init__( self , a_=None , a_=3_0522 , a_=768 , a_=6 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=1024 , a_=0.02 , a_=1E-12 , a_=0 , a_="absolute" , a_=True , a_=False , a_=101 , a_=102 , a_=None , **a_ , ): super().__init__(bos_token_id=a_ , eos_token_id=a_ , pad_token_id=a_ , **a_ ) if vision_config is None: lowerCamelCase_ : Tuple = {} logger.info("vision_config is None. initializing the GitVisionConfig with default values." ) lowerCamelCase_ : List[Any] = GitVisionConfig(**a_ ) lowerCamelCase_ : Optional[int] = vocab_size lowerCamelCase_ : List[str] = hidden_size lowerCamelCase_ : Union[str, Any] = num_hidden_layers lowerCamelCase_ : Union[str, Any] = num_attention_heads lowerCamelCase_ : Any = hidden_act lowerCamelCase_ : Optional[int] = intermediate_size lowerCamelCase_ : Tuple = hidden_dropout_prob lowerCamelCase_ : str = attention_probs_dropout_prob lowerCamelCase_ : int = max_position_embeddings lowerCamelCase_ : Union[str, Any] = initializer_range lowerCamelCase_ : List[Any] = layer_norm_eps lowerCamelCase_ : Optional[Any] = position_embedding_type lowerCamelCase_ : List[Any] = use_cache lowerCamelCase_ : str = tie_word_embeddings lowerCamelCase_ : Dict = num_image_with_embedding lowerCamelCase_ : List[Any] = bos_token_id lowerCamelCase_ : Optional[Any] = eos_token_id def _UpperCamelCase ( self ): lowerCamelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ ) lowerCamelCase_ : str = self.vision_config.to_dict() lowerCamelCase_ : Optional[int] = self.__class__.model_type return output
73
import re def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' if len(re.findall("[ATCG]" , lowerCAmelCase_)) != len(lowerCAmelCase_): raise ValueError("Invalid Strand") return dna.translate(dna.maketrans("ATCG" , "TAGC")) if __name__ == "__main__": import doctest doctest.testmod()
73
1