code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter __A = True except ImportError: __A = False __A = logging.get_logger(__name__) # pylint: disable=invalid-name def _SCREAMING_SNAKE_CASE ( A : Namespace ) -> str: """simple docstring""" return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class a_ ( UpperCamelCase_ ): @staticmethod def SCREAMING_SNAKE_CASE__ (__a) -> Union[str, Any]: __snake_case : Optional[Any] = parser.add_parser('add-new-model') add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.') add_new_model_parser.add_argument('--testing_file' , type=__a , help='Configuration file on which to run.') add_new_model_parser.add_argument( '--path' , type=__a , help='Path to cookiecutter. Should only be used for testing purposes.') add_new_model_parser.set_defaults(func=__a) def __init__(self , __a , __a , __a=None , *__a) -> int: __snake_case : Optional[int] = testing __snake_case : List[str] = testing_file __snake_case : Optional[Any] = path def SCREAMING_SNAKE_CASE__ (self) -> Any: warnings.warn( 'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ' 'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ' 'checks, you should use `transformers-cli add-new-model-like` instead.') if not _has_cookiecutter: raise ImportError( 'Model creation dependencies are required to use the `add_new_model` command. Install them by running ' 'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n') # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory __snake_case : Any = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:2_2]] if len(__a) > 0: raise ValueError( 'Several directories starting with `cookiecutter-template-` in current working directory. ' 'Please clean your directory by removing all folders starting with `cookiecutter-template-` or ' 'change your working directory.') __snake_case : Union[str, Any] = ( Path(__a).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent ) __snake_case : str = path_to_transformer_root / 'templates' / 'adding_a_new_model' # Execute cookiecutter if not self._testing: cookiecutter(str(__a)) else: with open(self._testing_file , 'r') as configuration_file: __snake_case : List[Any] = json.load(__a) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path) , no_input=__a , extra_context=__a , ) __snake_case : Union[str, Any] = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:2_2]][0] # Retrieve configuration with open(directory + '/configuration.json' , 'r') as configuration_file: __snake_case : List[str] = json.load(__a) __snake_case : Optional[Any] = configuration['lowercase_modelname'] __snake_case : List[Any] = configuration['generate_tensorflow_pytorch_and_flax'] os.remove(F"""{directory}/configuration.json""") __snake_case : str = 'PyTorch' in generate_tensorflow_pytorch_and_flax __snake_case : str = 'TensorFlow' in generate_tensorflow_pytorch_and_flax __snake_case : Any = 'Flax' in generate_tensorflow_pytorch_and_flax __snake_case : Optional[Any] = F"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}""" os.makedirs(__a , exist_ok=__a) os.makedirs(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__a) # Tests require submodules as they have parent imports with open(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , 'w'): pass shutil.move( F"""{directory}/__init__.py""" , F"""{model_dir}/__init__.py""" , ) shutil.move( F"""{directory}/configuration_{lowercase_model_name}.py""" , F"""{model_dir}/configuration_{lowercase_model_name}.py""" , ) def remove_copy_lines(__a): with open(__a , 'r') as f: __snake_case : Optional[int] = f.readlines() with open(__a , 'w') as f: for line in lines: if "# Copied from transformers." not in line: f.write(__a) if output_pytorch: if not self._testing: remove_copy_lines(F"""{directory}/modeling_{lowercase_model_name}.py""") shutil.move( F"""{directory}/modeling_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_{lowercase_model_name}.py""" , ) shutil.move( F"""{directory}/test_modeling_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , ) else: os.remove(F"""{directory}/modeling_{lowercase_model_name}.py""") os.remove(F"""{directory}/test_modeling_{lowercase_model_name}.py""") if output_tensorflow: if not self._testing: remove_copy_lines(F"""{directory}/modeling_tf_{lowercase_model_name}.py""") shutil.move( F"""{directory}/modeling_tf_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , ) shutil.move( F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , ) else: os.remove(F"""{directory}/modeling_tf_{lowercase_model_name}.py""") os.remove(F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""") if output_flax: if not self._testing: remove_copy_lines(F"""{directory}/modeling_flax_{lowercase_model_name}.py""") shutil.move( F"""{directory}/modeling_flax_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , ) shutil.move( F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , ) else: os.remove(F"""{directory}/modeling_flax_{lowercase_model_name}.py""") os.remove(F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""") shutil.move( F"""{directory}/{lowercase_model_name}.md""" , F"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , ) shutil.move( F"""{directory}/tokenization_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}.py""" , ) shutil.move( F"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(__a , __a , __a): # Create temp file __snake_case : Any = mkstemp() __snake_case : Any = False with fdopen(__a , 'w') as new_file: with open(__a) as old_file: for line in old_file: new_file.write(__a) if line_to_copy_below in line: __snake_case : Union[str, Any] = True for line_to_copy in lines_to_copy: new_file.write(__a) if not line_found: raise ValueError(F"""Line {line_to_copy_below} was not found in file.""") # Copy the file permissions from the old file to the new file copymode(__a , __a) # Remove original file remove(__a) # Move new file move(__a , __a) def skip_units(__a): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(__a): with open(__a) as datafile: __snake_case : Optional[Any] = [] __snake_case : Optional[int] = False __snake_case : List[Any] = False for line in datafile: if "# To replace in: " in line and "##" not in line: __snake_case : Optional[int] = line.split('"')[1] __snake_case : Dict = skip_units(__a) elif "# Below: " in line and "##" not in line: __snake_case : Optional[Any] = line.split('"')[1] __snake_case : Union[str, Any] = skip_units(__a) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(__a , __a , __a) __snake_case : List[Any] = [] elif "# Replace with" in line and "##" not in line: __snake_case : List[str] = [] elif "##" not in line: lines_to_copy.append(__a) remove(__a) replace_in_files(F"""{directory}/to_replace_{lowercase_model_name}.py""") os.rmdir(__a)
709
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __A = logging.get_logger(__name__) class a_ ( UpperCamelCase_ ): _snake_case = ["""pixel_values"""] def __init__(self , __a = True , __a = None , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = 1 / 2_5_5 , __a = True , __a = None , __a = None , **__a , ) -> None: """simple docstring""" super().__init__(**__a) __snake_case : Tuple = size if size is not None else {'shortest_edge': 3_8_4} __snake_case : List[Any] = get_size_dict(__a , default_to_square=__a) __snake_case : int = do_resize __snake_case : List[str] = size # Default value set here for backwards compatibility where the value in config is None __snake_case : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 __snake_case : Tuple = resample __snake_case : Dict = do_rescale __snake_case : Any = rescale_factor __snake_case : str = do_normalize __snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __snake_case : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray: """simple docstring""" __snake_case : Dict = get_size_dict(__a , default_to_square=__a) if "shortest_edge" not in size: raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""") __snake_case : List[str] = size['shortest_edge'] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __snake_case : Any = int(shortest_edge / crop_pct) __snake_case : Any = get_resize_output_image_size(__a , size=__a , default_to_square=__a) __snake_case : int = resize(image=__a , size=__a , resample=__a , data_format=__a , **__a) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__a , size=(shortest_edge, shortest_edge) , data_format=__a , **__a) else: # warping (no cropping) when evaluated at 384 or larger return resize( __a , size=(shortest_edge, shortest_edge) , resample=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , **__a , ) -> Any: """simple docstring""" return rescale(__a , scale=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray: """simple docstring""" return normalize(__a , mean=__a , std=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> PIL.Image.Image: """simple docstring""" __snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize __snake_case : Dict = crop_pct if crop_pct is not None else self.crop_pct __snake_case : Tuple = resample if resample is not None else self.resample __snake_case : Any = do_rescale if do_rescale is not None else self.do_rescale __snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize __snake_case : Optional[int] = image_mean if image_mean is not None else self.image_mean __snake_case : Optional[Any] = image_std if image_std is not None else self.image_std __snake_case : List[str] = size if size is not None else self.size __snake_case : Any = get_size_dict(__a , default_to_square=__a) __snake_case : Dict = make_list_of_images(__a) if not valid_images(__a): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.') if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # All transformations expect numpy arrays. __snake_case : Tuple = [to_numpy_array(__a) for image in images] if do_resize: __snake_case : Optional[int] = [self.resize(image=__a , size=__a , crop_pct=__a , resample=__a) for image in images] if do_rescale: __snake_case : Optional[int] = [self.rescale(image=__a , scale=__a) for image in images] if do_normalize: __snake_case : Any = [self.normalize(image=__a , mean=__a , std=__a) for image in images] __snake_case : Dict = [to_channel_dimension_format(__a , __a) for image in images] __snake_case : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=__a , tensor_type=__a)
61
0
import re from filelock import FileLock try: import nltk __A = True except (ImportError, ModuleNotFoundError): __A = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def _SCREAMING_SNAKE_CASE ( A : str ) -> str: """simple docstring""" re.sub('<n>' , '' , A ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(A ) )
710
'''simple docstring''' from functools import lru_cache @lru_cache def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" if num < 0: raise ValueError('Number should not be negative.' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
61
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : list[int] , A : list[int] , A : int ) -> bool: """simple docstring""" return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(A ) ) def _SCREAMING_SNAKE_CASE ( A : list[list[int]] , A : int , A : list[int] , A : int ) -> bool: """simple docstring""" # Base Case if index == len(A ): return True # Recursive Step for i in range(A ): if valid_coloring(graph[index] , A , A ): # Color current vertex __snake_case : int = i # Validate coloring if util_color(A , A , A , index + 1 ): return True # Backtrack __snake_case : Optional[int] = -1 return False def _SCREAMING_SNAKE_CASE ( A : list[list[int]] , A : int ) -> list[int]: """simple docstring""" __snake_case : Union[str, Any] = [-1] * len(A ) if util_color(A , A , A , 0 ): return colored_vertices return []
711
'''simple docstring''' import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = VQModel _snake_case = """sample""" @property def SCREAMING_SNAKE_CASE__ (self , __a=(3_2, 3_2)) -> str: """simple docstring""" __snake_case : Dict = 4 __snake_case : Optional[int] = 3 __snake_case : str = floats_tensor((batch_size, num_channels) + sizes).to(__a) return {"sample": image} @property def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" return (3, 3_2, 3_2) @property def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" return (3, 3_2, 3_2) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = { 'block_out_channels': [3_2, 6_4], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 3, } __snake_case : List[Any] = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case ,__snake_case : List[Any] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=__a) self.assertIsNotNone(__a) self.assertEqual(len(loading_info['missing_keys']) , 0) model.to(__a) __snake_case : Any = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = VQModel.from_pretrained('fusing/vqgan-dummy') model.to(__a).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) __snake_case : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size) __snake_case : Optional[int] = image.to(__a) with torch.no_grad(): __snake_case : List[Any] = model(__a).sample __snake_case : int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off __snake_case : int = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143]) # fmt: on self.assertTrue(torch.allclose(__a , __a , atol=1E-3))
61
0
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) __A = logging.getLogger(__name__) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" __snake_case : Any = argparse.ArgumentParser( description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' ) parser.add_argument('--file_path' , type=A , default='data/dump.txt' , help='The path to the data.' ) parser.add_argument('--tokenizer_type' , type=A , default='bert' , choices=['bert', 'roberta', 'gpt2'] ) parser.add_argument('--tokenizer_name' , type=A , default='bert-base-uncased' , help='The tokenizer to use.' ) parser.add_argument('--dump_file' , type=A , default='data/dump' , help='The dump file prefix.' ) __snake_case : Tuple = parser.parse_args() logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" ) if args.tokenizer_type == "bert": __snake_case : List[Any] = BertTokenizer.from_pretrained(args.tokenizer_name ) __snake_case : Optional[int] = tokenizer.special_tokens_map['cls_token'] # `[CLS]` __snake_case : Union[str, Any] = tokenizer.special_tokens_map['sep_token'] # `[SEP]` elif args.tokenizer_type == "roberta": __snake_case : str = RobertaTokenizer.from_pretrained(args.tokenizer_name ) __snake_case : Any = tokenizer.special_tokens_map['cls_token'] # `<s>` __snake_case : Optional[Any] = tokenizer.special_tokens_map['sep_token'] # `</s>` elif args.tokenizer_type == "gpt2": __snake_case : Any = GPTaTokenizer.from_pretrained(args.tokenizer_name ) __snake_case : List[str] = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>` __snake_case : Optional[int] = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>` logger.info(F"""Loading text from {args.file_path}""" ) with open(args.file_path , 'r' , encoding='utf8' ) as fp: __snake_case : str = fp.readlines() logger.info('Start encoding' ) logger.info(F"""{len(A )} examples to process.""" ) __snake_case : int = [] __snake_case : Union[str, Any] = 0 __snake_case : Dict = 1_00_00 __snake_case : int = time.time() for text in data: __snake_case : Optional[Any] = F"""{bos} {text.strip()} {sep}""" __snake_case : Optional[int] = tokenizer.encode(A , add_special_tokens=A ) rslt.append(A ) iter += 1 if iter % interval == 0: __snake_case : Optional[int] = time.time() logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" ) __snake_case : str = time.time() logger.info('Finished binarization' ) logger.info(F"""{len(A )} examples processed.""" ) __snake_case : int = F"""{args.dump_file}.{args.tokenizer_name}.pickle""" __snake_case : Tuple = tokenizer.vocab_size if vocab_size < (1 << 16): __snake_case : Optional[Any] = [np.uintaa(A ) for d in rslt] else: __snake_case : Any = [np.intaa(A ) for d in rslt] random.shuffle(rslt_ ) logger.info(F"""Dump to {dp_file}""" ) with open(A , 'wb' ) as handle: pickle.dump(rslt_ , A , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
712
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __A = logging.getLogger(__name__) @dataclass class a_ : _snake_case = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) _snake_case = field( default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) _snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class a_ : _snake_case = field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , ) _snake_case = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case ,__snake_case ,__snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case ,__snake_case ,__snake_case : int = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ' --overwrite_output_dir to overcome.' ) __snake_case : List[str] = import_module('tasks' ) try: __snake_case : Any = getattr(A , model_args.task_type ) __snake_case : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , A ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task __snake_case : Optional[Any] = token_classification_task.get_labels(data_args.labels ) __snake_case : Dict[int, str] = dict(enumerate(A ) ) __snake_case : Optional[Any] = len(A ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , ) __snake_case : List[str] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) __snake_case : Optional[int] = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , ) # Get datasets __snake_case : List[Any] = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __snake_case : int = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(A : np.ndarray , A : np.ndarray ) -> Tuple[List[int], List[int]]: __snake_case : str = np.argmax(A , axis=2 ) __snake_case ,__snake_case : int = preds.shape __snake_case : Dict = [[] for _ in range(A )] __snake_case : Union[str, Any] = [[] for _ in range(A )] for i in range(A ): for j in range(A ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(A : EvalPrediction ) -> Dict: __snake_case ,__snake_case : Any = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(A , A ), "precision": precision_score(A , A ), "recall": recall_score(A , A ), "f1": fa_score(A , A ), } # Data collator __snake_case : Optional[int] = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __snake_case : Optional[Any] = Trainer( model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __snake_case : List[Any] = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __snake_case : List[str] = trainer.evaluate() __snake_case : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) results.update(A ) # Predict if training_args.do_predict: __snake_case : str = TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) __snake_case ,__snake_case ,__snake_case : str = trainer.predict(A ) __snake_case ,__snake_case : List[str] = align_predictions(A , A ) __snake_case : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) # Save predictions __snake_case : List[str] = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(A , A , A ) return results def _SCREAMING_SNAKE_CASE ( A : int ) -> Any: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
61
0
'''simple docstring''' import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class a_ ( UpperCamelCase_ ): def __get__(self , __a , __a=None) -> int: """simple docstring""" if obj is None: return self if self.fget is None: raise AttributeError('unreadable attribute') __snake_case : List[Any] = '__cached_' + self.fget.__name__ __snake_case : Tuple = getattr(__a , __a , __a) if cached is None: __snake_case : List[Any] = self.fget(__a) setattr(__a , __a , __a) return cached def _SCREAMING_SNAKE_CASE ( A : Optional[int] ) -> Optional[int]: """simple docstring""" __snake_case : Dict = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F"""invalid truth value {val!r}""" ) def _SCREAMING_SNAKE_CASE ( A : str ) -> Union[str, Any]: """simple docstring""" if is_torch_fx_proxy(A ): return True if is_torch_available(): import torch if isinstance(A , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(A , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(A , (jnp.ndarray, Tracer) ): return True return isinstance(A , np.ndarray ) def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] ) -> Dict: """simple docstring""" return isinstance(A , np.ndarray ) def _SCREAMING_SNAKE_CASE ( A : str ) -> Any: """simple docstring""" return _is_numpy(A ) def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> Tuple: """simple docstring""" import torch return isinstance(A , torch.Tensor ) def _SCREAMING_SNAKE_CASE ( A : Dict ) -> Union[str, Any]: """simple docstring""" return False if not is_torch_available() else _is_torch(A ) def _SCREAMING_SNAKE_CASE ( A : Any ) -> str: """simple docstring""" import torch return isinstance(A , torch.device ) def _SCREAMING_SNAKE_CASE ( A : Dict ) -> Optional[int]: """simple docstring""" return False if not is_torch_available() else _is_torch_device(A ) def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> Any: """simple docstring""" import torch if isinstance(A , A ): if hasattr(A , A ): __snake_case : List[str] = getattr(A , A ) else: return False return isinstance(A , torch.dtype ) def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> List[str]: """simple docstring""" return False if not is_torch_available() else _is_torch_dtype(A ) def _SCREAMING_SNAKE_CASE ( A : Dict ) -> Any: """simple docstring""" import tensorflow as tf return isinstance(A , tf.Tensor ) def _SCREAMING_SNAKE_CASE ( A : Dict ) -> Optional[int]: """simple docstring""" return False if not is_tf_available() else _is_tensorflow(A ) def _SCREAMING_SNAKE_CASE ( A : Tuple ) -> int: """simple docstring""" import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(A , 'is_symbolic_tensor' ): return tf.is_symbolic_tensor(A ) return type(A ) == tf.Tensor def _SCREAMING_SNAKE_CASE ( A : List[str] ) -> Dict: """simple docstring""" return False if not is_tf_available() else _is_tf_symbolic_tensor(A ) def _SCREAMING_SNAKE_CASE ( A : List[str] ) -> Any: """simple docstring""" import jax.numpy as jnp # noqa: F811 return isinstance(A , jnp.ndarray ) def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return False if not is_flax_available() else _is_jax(A ) def _SCREAMING_SNAKE_CASE ( A : List[Any] ) -> Union[str, Any]: """simple docstring""" if isinstance(A , (dict, UserDict) ): return {k: to_py_obj(A ) for k, v in obj.items()} elif isinstance(A , (list, tuple) ): return [to_py_obj(A ) for o in obj] elif is_tf_tensor(A ): return obj.numpy().tolist() elif is_torch_tensor(A ): return obj.detach().cpu().tolist() elif is_jax_tensor(A ): return np.asarray(A ).tolist() elif isinstance(A , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def _SCREAMING_SNAKE_CASE ( A : List[str] ) -> Any: """simple docstring""" if isinstance(A , (dict, UserDict) ): return {k: to_numpy(A ) for k, v in obj.items()} elif isinstance(A , (list, tuple) ): return np.array(A ) elif is_tf_tensor(A ): return obj.numpy() elif is_torch_tensor(A ): return obj.detach().cpu().numpy() elif is_jax_tensor(A ): return np.asarray(A ) else: return obj class a_ ( UpperCamelCase_ ): def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" __snake_case : Dict = fields(self) # Safety and consistency checks if not len(__a): raise ValueError(F"""{self.__class__.__name__} has no fields.""") if not all(field.default is None for field in class_fields[1:]): raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""") __snake_case : Dict = getattr(self , class_fields[0].name) __snake_case : int = all(getattr(self , field.name) is None for field in class_fields[1:]) if other_fields_are_none and not is_tensor(__a): if isinstance(__a , __a): __snake_case : Tuple = first_field.items() __snake_case : List[Any] = True else: try: __snake_case : int = iter(__a) __snake_case : Union[str, Any] = True except TypeError: __snake_case : int = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(__a): if ( not isinstance(__a , (list, tuple)) or not len(__a) == 2 or not isinstance(element[0] , __a) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute __snake_case : str = first_field else: # If we have a mixed iterator, raise an error raise ValueError( F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""") break setattr(self , element[0] , element[1]) if element[1] is not None: __snake_case : Optional[Any] = element[1] elif first_field is not None: __snake_case : str = first_field else: for field in class_fields: __snake_case : Optional[int] = getattr(self , field.name) if v is not None: __snake_case : Tuple = v def __delitem__(self , *__a , **__a) -> Optional[Any]: """simple docstring""" raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""") def SCREAMING_SNAKE_CASE__ (self , *__a , **__a) -> Optional[Any]: """simple docstring""" raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""") def SCREAMING_SNAKE_CASE__ (self , *__a , **__a) -> List[str]: """simple docstring""" raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""") def SCREAMING_SNAKE_CASE__ (self , *__a , **__a) -> Union[str, Any]: """simple docstring""" raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""") def __getitem__(self , __a) -> Optional[Any]: """simple docstring""" if isinstance(__a , __a): __snake_case : Tuple = dict(self.items()) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__(self , __a , __a) -> Optional[Any]: """simple docstring""" if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(__a , __a) super().__setattr__(__a , __a) def __setitem__(self , __a , __a) -> Dict: """simple docstring""" super().__setitem__(__a , __a) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(__a , __a) def SCREAMING_SNAKE_CASE__ (self) -> Tuple[Any]: """simple docstring""" return tuple(self[k] for k in self.keys()) class a_ ( UpperCamelCase_ , UpperCamelCase_ ): @classmethod def SCREAMING_SNAKE_CASE__ (cls , __a) -> List[str]: """simple docstring""" raise ValueError( F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}""") class a_ ( UpperCamelCase_ ): _snake_case = """longest""" _snake_case = """max_length""" _snake_case = """do_not_pad""" class a_ ( UpperCamelCase_ ): _snake_case = """pt""" _snake_case = """tf""" _snake_case = """np""" _snake_case = """jax""" class a_ : def __init__(self , __a) -> Union[str, Any]: """simple docstring""" __snake_case : Union[str, Any] = context_managers __snake_case : Tuple = ExitStack() def __enter__(self) -> Any: """simple docstring""" for context_manager in self.context_managers: self.stack.enter_context(__a) def __exit__(self , *__a , **__a) -> int: """simple docstring""" self.stack.__exit__(*__a , **__a) def _SCREAMING_SNAKE_CASE ( A : List[Any] ) -> Tuple: """simple docstring""" __snake_case : Tuple = infer_framework(A ) if framework == "tf": __snake_case : int = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": __snake_case : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: __snake_case : Tuple = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def _SCREAMING_SNAKE_CASE ( A : List[Any] ) -> List[Any]: """simple docstring""" __snake_case : Dict = model_class.__name__ __snake_case : Dict = infer_framework(A ) if framework == "tf": __snake_case : Optional[int] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": __snake_case : int = inspect.signature(model_class.forward ) # PyTorch models else: __snake_case : Union[str, Any] = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def _SCREAMING_SNAKE_CASE ( A : MutableMapping , A : str = "" , A : str = "." ) -> Any: """simple docstring""" def _flatten_dict(A : int , A : Union[str, Any]="" , A : List[str]="." ): for k, v in d.items(): __snake_case : Dict = str(A ) + delimiter + str(A ) if parent_key else k if v and isinstance(A , A ): yield from flatten_dict(A , A , delimiter=A ).items() else: yield key, v return dict(_flatten_dict(A , A , A ) ) @contextmanager def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : bool = False ) -> Union[str, Any]: """simple docstring""" if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def _SCREAMING_SNAKE_CASE ( A : Dict , A : Optional[int]=None ) -> List[Any]: """simple docstring""" if is_numpy_array(A ): return np.transpose(A , axes=A ) elif is_torch_tensor(A ): return array.T if axes is None else array.permute(*A ) elif is_tf_tensor(A ): import tensorflow as tf return tf.transpose(A , perm=A ) elif is_jax_tensor(A ): return jnp.transpose(A , axes=A ) else: raise ValueError(F"""Type not supported for transpose: {type(A )}.""" ) def _SCREAMING_SNAKE_CASE ( A : str , A : List[Any] ) -> List[str]: """simple docstring""" if is_numpy_array(A ): return np.reshape(A , A ) elif is_torch_tensor(A ): return array.reshape(*A ) elif is_tf_tensor(A ): import tensorflow as tf return tf.reshape(A , A ) elif is_jax_tensor(A ): return jnp.reshape(A , A ) else: raise ValueError(F"""Type not supported for reshape: {type(A )}.""" ) def _SCREAMING_SNAKE_CASE ( A : Any , A : List[Any]=None ) -> Union[str, Any]: """simple docstring""" if is_numpy_array(A ): return np.squeeze(A , axis=A ) elif is_torch_tensor(A ): return array.squeeze() if axis is None else array.squeeze(dim=A ) elif is_tf_tensor(A ): import tensorflow as tf return tf.squeeze(A , axis=A ) elif is_jax_tensor(A ): return jnp.squeeze(A , axis=A ) else: raise ValueError(F"""Type not supported for squeeze: {type(A )}.""" ) def _SCREAMING_SNAKE_CASE ( A : Dict , A : Optional[int] ) -> int: """simple docstring""" if is_numpy_array(A ): return np.expand_dims(A , A ) elif is_torch_tensor(A ): return array.unsqueeze(dim=A ) elif is_tf_tensor(A ): import tensorflow as tf return tf.expand_dims(A , axis=A ) elif is_jax_tensor(A ): return jnp.expand_dims(A , axis=A ) else: raise ValueError(F"""Type not supported for expand_dims: {type(A )}.""" ) def _SCREAMING_SNAKE_CASE ( A : List[Any] ) -> Optional[int]: """simple docstring""" if is_numpy_array(A ): return np.size(A ) elif is_torch_tensor(A ): return array.numel() elif is_tf_tensor(A ): import tensorflow as tf return tf.size(A ) elif is_jax_tensor(A ): return array.size else: raise ValueError(F"""Type not supported for expand_dims: {type(A )}.""" ) def _SCREAMING_SNAKE_CASE ( A : int , A : Dict ) -> Optional[Any]: """simple docstring""" for key, value in auto_map.items(): if isinstance(A , (tuple, list) ): __snake_case : int = [F"""{repo_id}--{v}""" if (v is not None and '--' not in v) else v for v in value] elif value is not None and "--" not in value: __snake_case : Union[str, Any] = F"""{repo_id}--{value}""" return auto_map def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> Tuple: """simple docstring""" for base_class in inspect.getmro(A ): __snake_case : Union[str, Any] = base_class.__module__ __snake_case : str = base_class.__name__ if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('torch' ) or name == "PreTrainedModel": return "pt" elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F"""Could not infer framework from class {model_class}.""" )
713
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : list ) -> list: """simple docstring""" __snake_case : Tuple = False while is_sorted is False: # Until all the indices are traversed keep looping __snake_case : Optional[Any] = True for i in range(0 , len(A ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: __snake_case ,__snake_case : int = input_list[i + 1], input_list[i] # swapping if elements not in order __snake_case : List[Any] = False for i in range(1 , len(A ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: __snake_case ,__snake_case : Tuple = input_list[i + 1], input_list[i] # swapping if elements not in order __snake_case : Any = False return input_list if __name__ == "__main__": print('''Enter list to be sorted''') __A = [int(x) for x in input().split()] # inputing elements of the list in one line __A = odd_even_sort(input_list) print('''The sorted list is''') print(sorted_list)
61
0
'''simple docstring''' import os from pathlib import Path def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] , A : Any , A : Tuple ) -> Any: __snake_case : Dict = { 'en': 'Machine learning is great, isn\'t it?', 'ru': 'Машинное обучение - это здорово, не так ли?', 'de': 'Maschinelles Lernen ist großartig, oder?', } # BLUE scores as follows: # "pair": [fairseq, transformers] __snake_case : int = { 'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'], 'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'], 'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'], 'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'], } __snake_case : int = F"""{src_lang}-{tgt_lang}""" __snake_case : List[str] = F""" --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = \"{texts[src_lang]}\" input_ids = tokenizer.encode(input, return_tensors=\"pt\") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR's WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) """ os.makedirs(A , exist_ok=A ) __snake_case : Union[str, Any] = os.path.join(A , 'README.md' ) print(F"""Generating {path}""" ) with open(A , 'w' , encoding='utf-8' ) as f: f.write(A ) # make sure we are under the root of the project __A = Path(__file__).resolve().parent.parent.parent __A = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: __A , __A , __A = model_name.split('''-''') __A = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
714
'''simple docstring''' import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger() def _SCREAMING_SNAKE_CASE ( A : int , A : str , A : LevitConfig , A : Path , A : bool = True ) -> Dict: """simple docstring""" print(F"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 1_28: if name[-1] == "S": __snake_case : Optional[int] = timm.create_model('levit_128s' , pretrained=A ) else: __snake_case : Tuple = timm.create_model('levit_128' , pretrained=A ) if hidden_sizes == 1_92: __snake_case : int = timm.create_model('levit_192' , pretrained=A ) if hidden_sizes == 2_56: __snake_case : List[Any] = timm.create_model('levit_256' , pretrained=A ) if hidden_sizes == 3_84: __snake_case : int = timm.create_model('levit_384' , pretrained=A ) from_model.eval() __snake_case : str = LevitForImageClassificationWithTeacher(A ).eval() __snake_case : int = OrderedDict() __snake_case : Optional[Any] = from_model.state_dict() __snake_case : Tuple = list(from_model.state_dict().keys() ) __snake_case : List[str] = list(our_model.state_dict().keys() ) print(len(A ) , len(A ) ) for i in range(len(A ) ): __snake_case : Optional[int] = weights[og_keys[i]] our_model.load_state_dict(A ) __snake_case : Tuple = torch.randn((2, 3, 2_24, 2_24) ) __snake_case : Union[str, Any] = from_model(A ) __snake_case : List[str] = our_model(A ).logits assert torch.allclose(A , A ), "The model logits don't match the original one." __snake_case : int = name print(A ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __snake_case : int = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F"""Pushed {checkpoint_name}""" ) def _SCREAMING_SNAKE_CASE ( A : Path , A : str = None , A : bool = True ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = 'imagenet-1k-id2label.json' __snake_case : Tuple = 10_00 __snake_case : Dict = (1, num_labels) __snake_case : List[str] = 'huggingface/label-files' __snake_case : Any = num_labels __snake_case : str = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) ) __snake_case : Any = {int(A ): v for k, v in idalabel.items()} __snake_case : int = idalabel __snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()} __snake_case : Optional[int] = partial(A , num_labels=A , idalabel=A , labelaid=A ) __snake_case : Dict = { 'levit-128S': 1_28, 'levit-128': 1_28, 'levit-192': 1_92, 'levit-256': 2_56, 'levit-384': 3_84, } __snake_case : Union[str, Any] = { 'levit-128S': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-128': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-192': ImageNetPreTrainedConfig( hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-256': ImageNetPreTrainedConfig( hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-384': ImageNetPreTrainedConfig( hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , A , names_to_config[model_name] , A , A ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , A , A , A , A ) return config, expected_shape if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''levit-dump-folder/''', type=Path, required=False, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) __A = parser.parse_args() __A = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
61
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer __A = logging.get_logger(__name__) __A = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} __A = { '''vocab_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt''' ), '''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''', '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json''' ), }, } __A = { '''squeezebert/squeezebert-uncased''': 5_1_2, '''squeezebert/squeezebert-mnli''': 5_1_2, '''squeezebert/squeezebert-mnli-headless''': 5_1_2, } __A = { '''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True}, } class a_ ( UpperCamelCase_ ): _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_INIT_CONFIGURATION _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case = SqueezeBertTokenizer def __init__(self , __a=None , __a=None , __a=True , __a="[UNK]" , __a="[SEP]" , __a="[PAD]" , __a="[CLS]" , __a="[MASK]" , __a=True , __a=None , **__a , ) -> List[str]: """simple docstring""" super().__init__( __a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , ) __snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get('lowercase' , __a) != do_lower_case or normalizer_state.get('strip_accents' , __a) != strip_accents or normalizer_state.get('handle_chinese_chars' , __a) != tokenize_chinese_chars ): __snake_case : Optional[int] = getattr(__a , normalizer_state.pop('type')) __snake_case : List[str] = do_lower_case __snake_case : Union[str, Any] = strip_accents __snake_case : Union[str, Any] = tokenize_chinese_chars __snake_case : Union[str, Any] = normalizer_class(**__a) __snake_case : Tuple = do_lower_case def SCREAMING_SNAKE_CASE__ (self , __a , __a=None) -> Optional[int]: """simple docstring""" __snake_case : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def SCREAMING_SNAKE_CASE__ (self , __a , __a = None) -> List[int]: """simple docstring""" __snake_case : List[str] = [self.sep_token_id] __snake_case : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def SCREAMING_SNAKE_CASE__ (self , __a , __a = None) -> Tuple[str]: """simple docstring""" __snake_case : Optional[Any] = self._tokenizer.model.save(__a , name=__a) return tuple(__a)
715
'''simple docstring''' import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class a_ : def __init__(self , __a , __a = 1_3 , __a = 6_4 , __a = 2 , __a = 3 , __a = 3 , __a = True , __a = True , __a = 1_2_8 , __a=[1_6, 3_2, 6_4, 1_2_8] , __a = 7 , __a = 4 , __a = 3_7 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 1_0 , __a = 0.02 , __a = 2 , __a = 1 , __a = 1_2_8 , __a = [2, 2, 2, 2] , __a = 2 , __a = 2 , ) -> str: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Optional[int] = batch_size __snake_case : Optional[Any] = image_size __snake_case : Optional[int] = patch_size __snake_case : Optional[Any] = num_channels __snake_case : Optional[Any] = is_training __snake_case : Tuple = use_labels __snake_case : Optional[int] = hidden_size __snake_case : Any = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : Tuple = intermediate_size __snake_case : List[str] = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : Dict = type_sequence_label_size __snake_case : str = initializer_range __snake_case : int = encoder_stride __snake_case : List[str] = num_attention_outputs __snake_case : Optional[Any] = embed_dim __snake_case : Optional[Any] = embed_dim + 1 __snake_case : List[str] = resolution __snake_case : Optional[int] = depths __snake_case : List[Any] = hidden_sizes __snake_case : List[str] = dim __snake_case : Union[str, Any] = mlp_expansion_ratio def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __snake_case : List[str] = None if self.use_labels: __snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size) __snake_case : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = TFEfficientFormerModel(config=__a) __snake_case : int = model(__a , training=__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple: """simple docstring""" __snake_case : Dict = self.type_sequence_label_size __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : Optional[int] = model(__a , labels=__a , training=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images __snake_case : List[Any] = 1 __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __snake_case : str = model(__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Union[str, Any] = self.prepare_config_and_inputs() __snake_case ,__snake_case ,__snake_case : Union[str, Any] = config_and_inputs __snake_case : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) _snake_case = ( { """feature-extraction""": TFEfficientFormerModel, """image-classification""": ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) _snake_case = False _snake_case = False _snake_case = False _snake_case = False _snake_case = False def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Dict = TFEfficientFormerModelTester(self) __snake_case : List[Any] = ConfigTester( self , config_class=__a , has_text_modality=__a , hidden_size=3_7) def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds') def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings') def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case ,__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[int] = model_class(__a) __snake_case : Union[str, Any] = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Optional[int] = [*signature.parameters.keys()] __snake_case : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" def check_hidden_states_output(__a , __a , __a): __snake_case : str = model_class(__a) __snake_case : List[Any] = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(__a) , __a) if hasattr(self.model_tester , 'encoder_seq_length'): __snake_case : List[Any] = self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length') and self.model_tester.chunk_length > 1: __snake_case : str = seq_length * self.model_tester.chunk_length else: __snake_case : Optional[int] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: __snake_case : List[Any] = outputs.decoder_hidden_states self.asseretIsInstance(__a , (list, tuple)) self.assertEqual(len(__a) , __a) __snake_case : List[str] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'decoder_seq_length' , __a) self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , ) __snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : List[str] = True check_hidden_states_output(__a , __a , __a) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(__a , __a , __a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a=False) -> int: """simple docstring""" __snake_case : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet') def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = TFEfficientFormerModel.from_pretrained(__a) self.assertIsNotNone(__a) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Tuple = True __snake_case : Optional[Any] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'key_length' , __a) __snake_case : Optional[Any] = getattr(self.model_tester , 'chunk_length' , __a) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes'): __snake_case : str = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: __snake_case : Optional[Any] = True __snake_case : Dict = False __snake_case : Optional[int] = True __snake_case : Dict = model_class(__a) __snake_case : Tuple = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Dict = True __snake_case : str = model_class(__a) __snake_case : str = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case ,__snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model __snake_case : Tuple = model_class(__a) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes __snake_case : Optional[Any] = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__a) for key, val in model.input_signature.items() if key in model.dummy_inputs } __snake_case : Tuple = model(__a) self.assertTrue(outputs_dict is not None) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" __snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class a_ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : List[str] = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300') __snake_case : Optional[int] = self.default_image_processor __snake_case : List[Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : List[str] = model(**__a , training=__a) # verify the logits __snake_case : str = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4)) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300') __snake_case : List[Any] = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : Optional[int] = model(**__a , training=__a) # verify the logits __snake_case : Optional[int] = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : List[str] = tf.constant([-0.1_312, 0.4_353, -1.0_499]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4))
61
0
'''simple docstring''' from __future__ import annotations import math def _SCREAMING_SNAKE_CASE ( A : int ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True __A = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)] def _SCREAMING_SNAKE_CASE ( A : int ) -> list[int]: """simple docstring""" if not isinstance(A , A ): raise ValueError('n must be an integer' ) if n <= 0: raise ValueError('n must be >= 0' ) __snake_case : Dict = [] for num in range(len(A ) ): __snake_case : Dict = 0 while 2 * i * i <= odd_composites[num]: __snake_case : Any = odd_composites[num] - 2 * i * i if is_prime(A ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(A ) == n: return list_nums return [] def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(f'''{solution() = }''')
716
'''simple docstring''' __A = {str(digit): digit**5 for digit in range(1_0)} def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) ) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" return sum( number for number in range(10_00 , 1_00_00_00 ) if number == digits_fifth_powers_sum(A ) ) if __name__ == "__main__": print(solution())
61
0
'''simple docstring''' import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) set_seed(7_7_0) __A = { '''c_attn''': '''att_proj''', '''c_proj''': '''out_proj''', '''c_fc''': '''in_proj''', '''transformer.''': '''''', '''h.''': '''layers.''', '''ln_1''': '''layernorm_1''', '''ln_2''': '''layernorm_2''', '''ln_f''': '''layernorm_final''', '''wpe''': '''position_embeds_layer''', '''wte''': '''input_embeds_layer''', } __A = { '''text_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''text.pt''', }, '''coarse_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''coarse.pt''', }, '''fine_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''fine.pt''', }, '''text''': { '''repo_id''': '''suno/bark''', '''file_name''': '''text_2.pt''', }, '''coarse''': { '''repo_id''': '''suno/bark''', '''file_name''': '''coarse_2.pt''', }, '''fine''': { '''repo_id''': '''suno/bark''', '''file_name''': '''fine_2.pt''', }, } __A = os.path.dirname(os.path.abspath(__file__)) __A = os.path.join(os.path.expanduser('''~'''), '''.cache''') __A = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''') def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : Dict=False ) -> Any: """simple docstring""" __snake_case : Any = model_type if use_small: key += "_small" return os.path.join(A , REMOTE_MODEL_PATHS[key]['file_name'] ) def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : Any ) -> List[Any]: """simple docstring""" os.makedirs(A , exist_ok=A ) hf_hub_download(repo_id=A , filename=A , local_dir=A ) def _SCREAMING_SNAKE_CASE ( A : Dict , A : int , A : Dict=False , A : Dict="text" ) -> str: """simple docstring""" if model_type == "text": __snake_case : List[Any] = BarkSemanticModel __snake_case : int = BarkSemanticConfig __snake_case : Any = BarkSemanticGenerationConfig elif model_type == "coarse": __snake_case : Union[str, Any] = BarkCoarseModel __snake_case : Optional[int] = BarkCoarseConfig __snake_case : str = BarkCoarseGenerationConfig elif model_type == "fine": __snake_case : Dict = BarkFineModel __snake_case : Optional[Any] = BarkFineConfig __snake_case : str = BarkFineGenerationConfig else: raise NotImplementedError() __snake_case : Any = F"""{model_type}_small""" if use_small else model_type __snake_case : List[Any] = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(A ): logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" ) _download(model_info['repo_id'] , model_info['file_name'] ) __snake_case : List[Any] = torch.load(A , map_location=A ) # this is a hack __snake_case : Optional[Any] = checkpoint['model_args'] if "input_vocab_size" not in model_args: __snake_case : Optional[Any] = model_args['vocab_size'] __snake_case : Optional[Any] = model_args['vocab_size'] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments __snake_case : Any = model_args.pop('n_head' ) __snake_case : Tuple = model_args.pop('n_embd' ) __snake_case : int = model_args.pop('n_layer' ) __snake_case : str = ConfigClass(**checkpoint['model_args'] ) __snake_case : List[str] = ModelClass(config=A ) __snake_case : List[Any] = GenerationConfigClass() __snake_case : Optional[Any] = model_generation_config __snake_case : int = checkpoint['model'] # fixup checkpoint __snake_case : Optional[Any] = '_orig_mod.' for k, v in list(state_dict.items() ): if k.startswith(A ): # replace part of the key with corresponding layer name in HF implementation __snake_case : Union[str, Any] = k[len(A ) :] for old_layer_name in new_layer_name_dict: __snake_case : List[Any] = new_k.replace(A , new_layer_name_dict[old_layer_name] ) __snake_case : Optional[int] = state_dict.pop(A ) __snake_case : Union[str, Any] = set(state_dict.keys() ) - set(model.state_dict().keys() ) __snake_case : List[str] = {k for k in extra_keys if not k.endswith('.attn.bias' )} __snake_case : Union[str, Any] = set(model.state_dict().keys() ) - set(state_dict.keys() ) __snake_case : Optional[Any] = {k for k in missing_keys if not k.endswith('.attn.bias' )} if len(A ) != 0: raise ValueError(F"""extra keys found: {extra_keys}""" ) if len(A ) != 0: raise ValueError(F"""missing keys: {missing_keys}""" ) model.load_state_dict(A , strict=A ) __snake_case : int = model.num_parameters(exclude_embeddings=A ) __snake_case : List[Any] = checkpoint['best_val_loss'].item() logger.info(F"""model loaded: {round(n_params/1e6 , 1 )}M params, {round(A , 3 )} loss""" ) model.eval() model.to(A ) del checkpoint, state_dict return model def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : Optional[int]=False , A : Union[str, Any]="text" ) -> Dict: """simple docstring""" if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() __snake_case : List[str] = 'cpu' # do conversion on cpu __snake_case : str = _get_ckpt_path(A , use_small=A ) __snake_case : List[Any] = _load_model(A , A , model_type=A , use_small=A ) # load bark initial model __snake_case : Optional[int] = _bark_load_model(A , 'cpu' , model_type=A , use_small=A ) if model_type == "text": __snake_case : str = bark_model['model'] if model.num_parameters(exclude_embeddings=A ) != bark_model.get_num_params(): raise ValueError('initial and new models don\'t have the same number of parameters' ) # check if same output as the bark model __snake_case : Optional[Any] = 5 __snake_case : Optional[Any] = 10 if model_type in ["text", "coarse"]: __snake_case : Any = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int ) __snake_case : Union[str, Any] = bark_model(A )[0] __snake_case : Optional[int] = model(A ) # take last logits __snake_case : List[Any] = output_new_model_total.logits[:, [-1], :] else: __snake_case : Tuple = 3 __snake_case : List[str] = 8 __snake_case : str = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) __snake_case : Optional[Any] = model(A , A ) __snake_case : Optional[Any] = bark_model(A , A ) __snake_case : List[str] = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError('initial and new outputs don\'t have the same shape' ) if (output_new_model - output_old_model).abs().max().item() > 1e-3: raise ValueError('initial and new outputs are not equal' ) Path(A ).mkdir(exist_ok=A ) model.save_pretrained(A ) def _SCREAMING_SNAKE_CASE ( A : List[str] , A : Dict , A : Optional[int] , A : int , A : List[str] , A : str , ) -> int: """simple docstring""" __snake_case : Tuple = os.path.join(A , A ) __snake_case : List[Any] = BarkSemanticConfig.from_pretrained(os.path.join(A , 'config.json' ) ) __snake_case : List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(A , 'config.json' ) ) __snake_case : Any = BarkFineConfig.from_pretrained(os.path.join(A , 'config.json' ) ) __snake_case : Union[str, Any] = EncodecConfig.from_pretrained('facebook/encodec_24khz' ) __snake_case : List[str] = BarkSemanticModel.from_pretrained(A ) __snake_case : Tuple = BarkCoarseModel.from_pretrained(A ) __snake_case : Optional[Any] = BarkFineModel.from_pretrained(A ) __snake_case : Optional[int] = EncodecModel.from_pretrained('facebook/encodec_24khz' ) __snake_case : List[Any] = BarkConfig.from_sub_model_configs( A , A , A , A ) __snake_case : Optional[int] = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) __snake_case : Union[str, Any] = BarkModel(A ) __snake_case : List[Any] = semantic __snake_case : int = coarseAcoustic __snake_case : Union[str, Any] = fineAcoustic __snake_case : Any = codec __snake_case : Dict = bark_generation_config Path(A ).mkdir(exist_ok=A ) bark.save_pretrained(A , repo_id=A , push_to_hub=A ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''') __A = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
717
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class a_ : _snake_case = 42 _snake_case = None _snake_case = None def _SCREAMING_SNAKE_CASE ( ) -> Node | None: """simple docstring""" __snake_case : str = Node(1 ) __snake_case : Tuple = Node(2 ) __snake_case : Optional[int] = Node(3 ) __snake_case : List[str] = Node(4 ) __snake_case : List[str] = Node(5 ) return tree def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> int: """simple docstring""" return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] if root is None: return output __snake_case : Optional[int] = deque([root] ) while process_queue: __snake_case : List[str] = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(A , A ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(A , A ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None] | list[Any]: """simple docstring""" if root is None: return [] __snake_case : list[Sequence[Node | None]] = [] __snake_case : List[Any] = 0 __snake_case : int = height(A ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(A , A ) ) __snake_case : int = 1 else: output.append(get_nodes_from_right_to_left(A , A ) ) __snake_case : Tuple = 0 return output def _SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing. """simple docstring""" __snake_case : Optional[int] = make_tree() print(F"""In-order Traversal: {inorder(A )}""" ) print(F"""Pre-order Traversal: {preorder(A )}""" ) print(F"""Post-order Traversal: {postorder(A )}""" , '\n' ) print(F"""Height of Tree: {height(A )}""" , '\n' ) print('Complete Level Order Traversal: ' ) print(level_order(A ) , '\n' ) print('Level-wise order Traversal: ' ) for level in range(1 , height(A ) + 1 ): print(F"""Level {level}:""" , get_nodes_from_left_to_right(A , level=A ) ) print('\nZigZag order Traversal: ' ) print(zigzag(A ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
61
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : str ) -> bool: """simple docstring""" __snake_case : Optional[Any] = 0 for ch in input_str: __snake_case : str = ord(A ) __snake_case : List[str] = pow(2 , A ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
718
'''simple docstring''' from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class a_ : def __init__(self , __a = None) -> None: """simple docstring""" if components is None: __snake_case : List[str] = [] __snake_case : Optional[int] = list(__a) def __len__(self) -> int: """simple docstring""" return len(self.__components) def __str__(self) -> str: """simple docstring""" return "(" + ",".join(map(__a , self.__components)) + ")" def __add__(self , __a) -> Vector: """simple docstring""" __snake_case : Optional[Any] = len(self) if size == len(__a): __snake_case : Optional[int] = [self.__components[i] + other.component(__a) for i in range(__a)] return Vector(__a) else: raise Exception('must have the same size') def __sub__(self , __a) -> Vector: """simple docstring""" __snake_case : Optional[Any] = len(self) if size == len(__a): __snake_case : Optional[int] = [self.__components[i] - other.component(__a) for i in range(__a)] return Vector(__a) else: # error case raise Exception('must have the same size') @overload def __mul__(self , __a) -> Vector: """simple docstring""" ... @overload def __mul__(self , __a) -> float: """simple docstring""" ... def __mul__(self , __a) -> float | Vector: """simple docstring""" if isinstance(__a , (float, int)): __snake_case : str = [c * other for c in self.__components] return Vector(__a) elif isinstance(__a , __a) and len(self) == len(__a): __snake_case : List[Any] = len(self) __snake_case : Dict = [self.__components[i] * other.component(__a) for i in range(__a)] return sum(__a) else: # error case raise Exception('invalid operand!') def SCREAMING_SNAKE_CASE__ (self) -> Vector: """simple docstring""" return Vector(self.__components) def SCREAMING_SNAKE_CASE__ (self , __a) -> float: """simple docstring""" if isinstance(__a , __a) and -len(self.__components) <= i < len(self.__components): return self.__components[i] else: raise Exception('index out of range') def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> None: """simple docstring""" assert -len(self.__components) <= pos < len(self.__components) __snake_case : int = value def SCREAMING_SNAKE_CASE__ (self) -> float: """simple docstring""" if len(self.__components) == 0: raise Exception('Vector is empty') __snake_case : Tuple = [c**2 for c in self.__components] return math.sqrt(sum(__a)) def SCREAMING_SNAKE_CASE__ (self , __a , __a = False) -> float: """simple docstring""" __snake_case : Tuple = self * other __snake_case : Optional[int] = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den)) else: return math.acos(num / den) def _SCREAMING_SNAKE_CASE ( A : int ) -> Vector: """simple docstring""" assert isinstance(A , A ) return Vector([0] * dimension ) def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> Vector: """simple docstring""" assert isinstance(A , A ) and (isinstance(A , A )) __snake_case : Any = [0] * dimension __snake_case : int = 1 return Vector(A ) def _SCREAMING_SNAKE_CASE ( A : float , A : Vector , A : Vector ) -> Vector: """simple docstring""" assert ( isinstance(A , A ) and isinstance(A , A ) and (isinstance(A , (int, float) )) ) return x * scalar + y def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int ) -> Vector: """simple docstring""" random.seed(A ) __snake_case : List[Any] = [random.randint(A , A ) for _ in range(A )] return Vector(A ) class a_ : def __init__(self , __a , __a , __a) -> None: """simple docstring""" __snake_case : Union[str, Any] = matrix __snake_case : int = w __snake_case : str = h def __str__(self) -> str: """simple docstring""" __snake_case : Dict = '' for i in range(self.__height): ans += "|" for j in range(self.__width): if j < self.__width - 1: ans += str(self.__matrix[i][j]) + "," else: ans += str(self.__matrix[i][j]) + "|\n" return ans def __add__(self , __a) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __snake_case : Tuple = [] for i in range(self.__height): __snake_case : List[Any] = [ self.__matrix[i][j] + other.component(__a , __a) for j in range(self.__width) ] matrix.append(__a) return Matrix(__a , self.__width , self.__height) else: raise Exception('matrix must have the same dimension!') def __sub__(self , __a) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __snake_case : str = [] for i in range(self.__height): __snake_case : List[str] = [ self.__matrix[i][j] - other.component(__a , __a) for j in range(self.__width) ] matrix.append(__a) return Matrix(__a , self.__width , self.__height) else: raise Exception('matrices must have the same dimension!') @overload def __mul__(self , __a) -> Matrix: """simple docstring""" ... @overload def __mul__(self , __a) -> Vector: """simple docstring""" ... def __mul__(self , __a) -> Vector | Matrix: """simple docstring""" if isinstance(__a , __a): # matrix-vector if len(__a) == self.__width: __snake_case : Tuple = zero_vector(self.__height) for i in range(self.__height): __snake_case : Union[str, Any] = [ self.__matrix[i][j] * other.component(__a) for j in range(self.__width) ] ans.change_component(__a , sum(__a)) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!') elif isinstance(__a , (int, float)): # matrix-scalar __snake_case : str = [ [self.__matrix[i][j] * other for j in range(self.__width)] for i in range(self.__height) ] return Matrix(__a , self.__width , self.__height) return None def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.__height def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.__width def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds') def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> None: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: __snake_case : List[Any] = value else: raise Exception('change_component: indices out of bounds') def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') __snake_case : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(__a)): __snake_case : Tuple = minor[i][:y] + minor[i][y + 1 :] return Matrix(__a , self.__width - 1 , self.__height - 1).determinant() def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(__a , __a) else: raise Exception('Indices out of bounds') def SCREAMING_SNAKE_CASE__ (self) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') if self.__height < 1: raise Exception('Matrix has no element') elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __snake_case : Any = [ self.__matrix[0][y] * self.cofactor(0 , __a) for y in range(self.__width) ] return sum(__a) def _SCREAMING_SNAKE_CASE ( A : int ) -> Matrix: """simple docstring""" __snake_case : list[list[float]] = [[0] * n for _ in range(A )] return Matrix(A , A , A ) def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int , A : int ) -> Matrix: """simple docstring""" random.seed(A ) __snake_case : list[list[float]] = [ [random.randint(A , A ) for _ in range(A )] for _ in range(A ) ] return Matrix(A , A , A )
61
0
'''simple docstring''' import os import unittest from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = PhobertTokenizer _snake_case = False def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __snake_case : Optional[int] = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@'] __snake_case : Optional[int] = dict(zip(__a , range(len(__a)))) __snake_case : Any = ['#version: 0.2', 'l à</w>'] __snake_case : Tuple = {'unk_token': '<unk>'} __snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) __snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: for token in vocab_tokens: fp.write(F"""{token} {vocab_tokens[token]}\n""") with open(self.merges_file , 'w' , encoding='utf-8') as fp: fp.write('\n'.join(__a)) def SCREAMING_SNAKE_CASE__ (self , **__a) -> Dict: """simple docstring""" kwargs.update(self.special_tokens_map) return PhobertTokenizer.from_pretrained(self.tmpdirname , **__a) def SCREAMING_SNAKE_CASE__ (self , __a) -> Any: """simple docstring""" __snake_case : List[str] = 'Tôi là VinAI Research' __snake_case : Optional[int] = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>' return input_text, output_text def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : str = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map) __snake_case : int = 'Tôi là VinAI Research' __snake_case : Optional[Any] = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split() __snake_case : str = tokenizer.tokenize(__a) print(__a) self.assertListEqual(__a , __a) __snake_case : List[Any] = tokens + [tokenizer.unk_token] __snake_case : List[str] = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , __a)
719
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification __A = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co __A = '''main''' # Default branch name __A = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2''' # One particular commit (not the top of `main`) __A = '''aaaaaaa''' # This commit does not exist, so we should 404. __A = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684''' # Sha-1 of config.json on the top of `main`, for checking purposes __A = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3''' @contextlib.contextmanager def _SCREAMING_SNAKE_CASE ( ) -> List[Any]: """simple docstring""" print('Welcome!' ) yield print('Bye!' ) @contextlib.contextmanager def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: """simple docstring""" print('Bonjour!' ) yield print('Au revoir!' ) class a_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" assert transformers.__spec__ is not None assert importlib.util.find_spec('transformers') is not None class a_ ( unittest.TestCase ): @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> int: """simple docstring""" with ContextManagers([]): print('Transformers are awesome!') # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n') @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]: """simple docstring""" with ContextManagers([context_en()]): print('Transformers are awesome!') # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n') @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> Tuple: """simple docstring""" with ContextManagers([context_fr(), context_en()]): print('Transformers are awesome!') # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n') @require_torch def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" self.assertEqual(find_labels(__a) , ['labels']) self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label']) self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions']) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , ['labels']) @require_tf def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" self.assertEqual(find_labels(__a) , ['labels']) self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label']) self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions']) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , ['labels']) @require_flax def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" self.assertEqual(find_labels(__a) , []) self.assertEqual(find_labels(__a) , []) self.assertEqual(find_labels(__a) , []) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , [])
61
0
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = { '''post_extract_proj''': '''feature_projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.upsample.0''': '''encoder.upsample.projection''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] , A : Tuple , A : Union[str, Any] , A : List[str] , A : Any ) -> Any: """simple docstring""" for attribute in key.split('.' ): __snake_case : Optional[int] = getattr(A , A ) if weight_type is not None: __snake_case : Any = getattr(A , A ).shape else: __snake_case : Any = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __snake_case : Optional[Any] = value elif weight_type == "weight_g": __snake_case : str = value elif weight_type == "weight_v": __snake_case : List[Any] = value elif weight_type == "bias": __snake_case : List[str] = value else: __snake_case : List[Any] = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : List[str] , A : Dict ) -> int: """simple docstring""" __snake_case : Optional[Any] = [] __snake_case : int = fairseq_model.state_dict() __snake_case : Optional[int] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __snake_case : str = False if "conv_layers" in name: load_conv_layer( A , A , A , A , hf_model.config.feat_extract_norm == 'group' , ) __snake_case : List[Any] = True else: for key, mapped_key in MAPPING.items(): __snake_case : Tuple = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __snake_case : List[str] = True if "*" in mapped_key: __snake_case : int = name.split(A )[0].split('.' )[-2] __snake_case : List[str] = mapped_key.replace('*' , A ) if "weight_g" in name: __snake_case : str = 'weight_g' elif "weight_v" in name: __snake_case : Tuple = 'weight_v' elif "weight" in name: __snake_case : Optional[int] = 'weight' elif "bias" in name: __snake_case : str = 'bias' else: __snake_case : List[str] = None set_recursively(A , A , A , A , A ) continue if not is_used: unused_weights.append(A ) logger.warning(F"""Unused weights: {unused_weights}""" ) def _SCREAMING_SNAKE_CASE ( A : Tuple , A : Optional[int] , A : List[str] , A : int , A : int ) -> Dict: """simple docstring""" __snake_case : Tuple = full_name.split('conv_layers.' )[-1] __snake_case : Optional[int] = name.split('.' ) __snake_case : str = int(items[0] ) __snake_case : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __snake_case : Optional[int] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __snake_case : Optional[Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __snake_case : str = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __snake_case : Dict = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(A ) def _SCREAMING_SNAKE_CASE ( A : Dict , A : int ) -> Optional[Any]: """simple docstring""" __snake_case : Optional[Any] = SEWConfig() if is_finetuned: __snake_case : List[str] = model.wav_encoder.wav_model.cfg else: __snake_case : Dict = model.cfg __snake_case : Dict = fs_config.conv_bias __snake_case : Tuple = eval(fs_config.conv_feature_layers ) __snake_case : Optional[Any] = [x[0] for x in conv_layers] __snake_case : List[Any] = [x[1] for x in conv_layers] __snake_case : str = [x[2] for x in conv_layers] __snake_case : Tuple = 'gelu' __snake_case : Union[str, Any] = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group' __snake_case : List[Any] = 0.0 __snake_case : Tuple = fs_config.activation_fn.name __snake_case : List[Any] = fs_config.encoder_embed_dim __snake_case : Tuple = 0.02 __snake_case : List[Any] = fs_config.encoder_ffn_embed_dim __snake_case : Optional[Any] = 1e-5 __snake_case : Optional[Any] = fs_config.encoder_layerdrop __snake_case : Optional[Any] = fs_config.encoder_attention_heads __snake_case : Union[str, Any] = fs_config.conv_pos_groups __snake_case : Any = fs_config.conv_pos __snake_case : Any = len(A ) __snake_case : str = fs_config.encoder_layers __snake_case : Optional[int] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: __snake_case : int = model.cfg __snake_case : Dict = fs_config.final_dropout __snake_case : List[str] = fs_config.layerdrop __snake_case : Union[str, Any] = fs_config.activation_dropout __snake_case : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 __snake_case : Tuple = fs_config.attention_dropout __snake_case : Tuple = fs_config.dropout_input __snake_case : Optional[Any] = fs_config.dropout __snake_case : int = fs_config.mask_channel_length __snake_case : Optional[Any] = fs_config.mask_channel_prob __snake_case : int = fs_config.mask_length __snake_case : Dict = fs_config.mask_prob __snake_case : Tuple = 'Wav2Vec2FeatureExtractor' __snake_case : List[str] = 'Wav2Vec2CTCTokenizer' return config @torch.no_grad() def _SCREAMING_SNAKE_CASE ( A : int , A : List[str] , A : List[str]=None , A : List[Any]=None , A : Optional[int]=True ) -> List[Any]: """simple docstring""" if is_finetuned: __snake_case : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: __snake_case : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: __snake_case : List[str] = SEWConfig.from_pretrained(A ) else: __snake_case : Optional[Any] = convert_config(model[0] , A ) __snake_case : Optional[int] = model[0].eval() __snake_case : Tuple = True if config.feat_extract_norm == 'layer' else False __snake_case : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=A , return_attention_mask=A , ) if is_finetuned: if dict_path: __snake_case : Union[str, Any] = Dictionary.load(A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : str = target_dict.pad_index __snake_case : Union[str, Any] = target_dict.bos_index __snake_case : List[str] = target_dict.pad_index __snake_case : Tuple = target_dict.bos_index __snake_case : List[str] = target_dict.eos_index __snake_case : Union[str, Any] = len(target_dict.symbols ) __snake_case : int = os.path.join(A , 'vocab.json' ) if not os.path.isdir(A ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(A ) ) return os.makedirs(A , exist_ok=A ) with open(A , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(target_dict.indices , A ) __snake_case : List[str] = WavaVecaCTCTokenizer( A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=A , ) __snake_case : Optional[int] = WavaVecaProcessor(feature_extractor=A , tokenizer=A ) processor.save_pretrained(A ) __snake_case : Optional[int] = SEWForCTC(A ) else: __snake_case : Any = SEWModel(A ) feature_extractor.save_pretrained(A ) recursively_load_weights(A , A , A ) hf_model.save_pretrained(A ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) __A = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
720
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''TimmBackbone'''] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
0
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''') __A = logging.getLogger(__name__) @dataclass class a_ : _snake_case = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) _snake_case = field( default=UpperCamelCase_ , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) _snake_case = field( default=UpperCamelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) _snake_case = field( default=UpperCamelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) _snake_case = field( default=UpperCamelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of prediction examples to this """ """value if set.""" ) } , ) @dataclass class a_ : _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Train language if it is different from the evaluation language."""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) _snake_case = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) _snake_case = field( default=UpperCamelCase_ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: """simple docstring""" __snake_case : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __snake_case : int = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_xnli' , A ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __snake_case : Any = training_args.get_process_log_level() logger.setLevel(A ) datasets.utils.logging.set_verbosity(A ) transformers.utils.logging.set_verbosity(A ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. __snake_case : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __snake_case : Tuple = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: __snake_case : Any = load_dataset( 'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: __snake_case : int = load_dataset( 'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) __snake_case : Optional[Any] = train_dataset.features['label'].names if training_args.do_eval: __snake_case : Any = load_dataset( 'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) __snake_case : Any = eval_dataset.features['label'].names if training_args.do_predict: __snake_case : Any = load_dataset( 'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) __snake_case : str = predict_dataset.features['label'].names # Labels __snake_case : Dict = len(A ) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case : List[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel={str(A ): label for i, label in enumerate(A )} , labelaid={label: i for i, label in enumerate(A )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __snake_case : Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: __snake_case : List[Any] = 'max_length' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __snake_case : int = False def preprocess_function(A : List[str] ): # Tokenize the texts return tokenizer( examples['premise'] , examples['hypothesis'] , padding=A , max_length=data_args.max_seq_length , truncation=A , ) if training_args.do_train: if data_args.max_train_samples is not None: __snake_case : Tuple = min(len(A ) , data_args.max_train_samples ) __snake_case : Dict = train_dataset.select(range(A ) ) with training_args.main_process_first(desc='train dataset map pre-processing' ): __snake_case : int = train_dataset.map( A , batched=A , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , ) # Log a few random samples from the training set: for index in random.sample(range(len(A ) ) , 3 ): logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" ) if training_args.do_eval: if data_args.max_eval_samples is not None: __snake_case : Optional[int] = min(len(A ) , data_args.max_eval_samples ) __snake_case : Optional[int] = eval_dataset.select(range(A ) ) with training_args.main_process_first(desc='validation dataset map pre-processing' ): __snake_case : Dict = eval_dataset.map( A , batched=A , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , ) if training_args.do_predict: if data_args.max_predict_samples is not None: __snake_case : Dict = min(len(A ) , data_args.max_predict_samples ) __snake_case : Optional[Any] = predict_dataset.select(range(A ) ) with training_args.main_process_first(desc='prediction dataset map pre-processing' ): __snake_case : Union[str, Any] = predict_dataset.map( A , batched=A , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , ) # Get the metric function __snake_case : Optional[int] = evaluate.load('xnli' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(A : EvalPrediction ): __snake_case : List[str] = p.predictions[0] if isinstance(p.predictions , A ) else p.predictions __snake_case : Optional[Any] = np.argmax(A , axis=1 ) return metric.compute(predictions=A , references=p.label_ids ) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __snake_case : int = default_data_collator elif training_args.fpaa: __snake_case : Any = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) else: __snake_case : Tuple = None # Initialize our Trainer __snake_case : Dict = Trainer( model=A , args=A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=A , tokenizer=A , data_collator=A , ) # Training if training_args.do_train: __snake_case : Dict = None if training_args.resume_from_checkpoint is not None: __snake_case : Optional[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: __snake_case : int = last_checkpoint __snake_case : List[str] = trainer.train(resume_from_checkpoint=A ) __snake_case : Union[str, Any] = train_result.metrics __snake_case : Any = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(A ) ) __snake_case : List[Any] = min(A , len(A ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('train' , A ) trainer.save_metrics('train' , A ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) __snake_case : List[str] = trainer.evaluate(eval_dataset=A ) __snake_case : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(A ) __snake_case : Optional[Any] = min(A , len(A ) ) trainer.log_metrics('eval' , A ) trainer.save_metrics('eval' , A ) # Prediction if training_args.do_predict: logger.info('*** Predict ***' ) __snake_case : str = trainer.predict(A , metric_key_prefix='predict' ) __snake_case : int = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(A ) ) __snake_case : Union[str, Any] = min(A , len(A ) ) trainer.log_metrics('predict' , A ) trainer.save_metrics('predict' , A ) __snake_case : Union[str, Any] = np.argmax(A , axis=1 ) __snake_case : Optional[int] = os.path.join(training_args.output_dir , 'predictions.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: writer.write('index\tprediction\n' ) for index, item in enumerate(A ): __snake_case : List[Any] = label_list[item] writer.write(F"""{index}\t{item}\n""" ) if __name__ == "__main__": main()
721
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" __snake_case : str = 1 for i in range(1 , num + 1 ): fact *= i return fact def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" __snake_case : Union[str, Any] = 0 while number > 0: __snake_case : Dict = number % 10 sum_of_digits += last_digit __snake_case : Union[str, Any] = number // 10 # Removing the last_digit from the given number return sum_of_digits def _SCREAMING_SNAKE_CASE ( A : int = 1_00 ) -> int: """simple docstring""" __snake_case : List[Any] = factorial(A ) __snake_case : Dict = split_and_add(A ) return result if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
61
0
'''simple docstring''' import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = VQModel _snake_case = """sample""" @property def SCREAMING_SNAKE_CASE__ (self , __a=(3_2, 3_2)) -> str: """simple docstring""" __snake_case : Dict = 4 __snake_case : Optional[int] = 3 __snake_case : str = floats_tensor((batch_size, num_channels) + sizes).to(__a) return {"sample": image} @property def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" return (3, 3_2, 3_2) @property def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" return (3, 3_2, 3_2) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = { 'block_out_channels': [3_2, 6_4], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 3, } __snake_case : List[Any] = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : List[Any] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=__a) self.assertIsNotNone(__a) self.assertEqual(len(loading_info['missing_keys']) , 0) model.to(__a) __snake_case : Any = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = VQModel.from_pretrained('fusing/vqgan-dummy') model.to(__a).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) __snake_case : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size) __snake_case : Optional[int] = image.to(__a) with torch.no_grad(): __snake_case : List[Any] = model(__a).sample __snake_case : int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off __snake_case : int = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143]) # fmt: on self.assertTrue(torch.allclose(__a , __a , atol=1E-3))
700
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class a_ ( unittest.TestCase ): def __init__(self , __a , __a=7 , __a=3 , __a=1_8 , __a=3_0 , __a=4_0_0 , __a=True , __a=None , __a=True , __a=None , __a=True , __a=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __a=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __a=True , ) -> List[Any]: """simple docstring""" __snake_case : Tuple = size if size is not None else {'height': 2_2_4, 'width': 2_2_4} __snake_case : Any = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8} __snake_case : Optional[int] = parent __snake_case : Dict = batch_size __snake_case : str = num_channels __snake_case : Optional[Any] = image_size __snake_case : Optional[int] = min_resolution __snake_case : Tuple = max_resolution __snake_case : Optional[int] = do_resize __snake_case : Optional[int] = size __snake_case : Union[str, Any] = do_center_crop __snake_case : List[Any] = crop_size __snake_case : int = do_normalize __snake_case : Optional[Any] = image_mean __snake_case : str = image_std __snake_case : Optional[Any] = do_convert_rgb def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def SCREAMING_SNAKE_CASE__ (self , __a=False , __a=False , __a=False) -> List[str]: """simple docstring""" assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __snake_case : Optional[int] = [] for i in range(self.batch_size): image_inputs.append( np.random.randint( 2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta)) else: __snake_case : Dict = [] for i in range(self.batch_size): __snake_case ,__snake_case : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2) image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta)) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __snake_case : int = [Image.fromarray(np.moveaxis(__a , 0 , -1)) for x in image_inputs] if torchify: __snake_case : List[Any] = [torch.from_numpy(__a) for x in image_inputs] return image_inputs @require_torch @require_vision class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Union[str, Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=__a) @property def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : int = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__a , 'do_resize')) self.assertTrue(hasattr(__a , 'size')) self.assertTrue(hasattr(__a , 'do_center_crop')) self.assertTrue(hasattr(__a , 'center_crop')) self.assertTrue(hasattr(__a , 'do_normalize')) self.assertTrue(hasattr(__a , 'image_mean')) self.assertTrue(hasattr(__a , 'image_std')) self.assertTrue(hasattr(__a , 'do_convert_rgb')) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 2_2_4, 'width': 2_2_4}) self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8}) __snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4) self.assertEqual(image_processor.size , {'shortest_edge': 4_2}) self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4}) def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __snake_case : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a) for image in image_inputs: self.assertIsInstance(__a , Image.Image) # Test not batched input __snake_case : int = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : List[Any] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __snake_case : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=__a , numpify=__a) for image in image_inputs: self.assertIsInstance(__a , np.ndarray) # Test not batched input __snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : int = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Any = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __snake_case : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__a , torchify=__a) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor) # Test not batched input __snake_case : Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : Union[str, Any] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) @require_torch @require_vision class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__a) __snake_case : List[Any] = 3 @property def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__a , 'do_resize')) self.assertTrue(hasattr(__a , 'size')) self.assertTrue(hasattr(__a , 'do_center_crop')) self.assertTrue(hasattr(__a , 'center_crop')) self.assertTrue(hasattr(__a , 'do_normalize')) self.assertTrue(hasattr(__a , 'image_mean')) self.assertTrue(hasattr(__a , 'image_std')) self.assertTrue(hasattr(__a , 'do_convert_rgb')) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __snake_case : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a) for image in image_inputs: self.assertIsInstance(__a , Image.Image) # Test not batched input __snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : Optional[int] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
61
0
'''simple docstring''' import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def _SCREAMING_SNAKE_CASE ( A : Any ) -> int: """simple docstring""" __snake_case : int = image.size __snake_case : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 __snake_case : Dict = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) __snake_case : int = np.array(A ).astype(np.floataa ) / 255.0 __snake_case : Dict = image[None].transpose(0 , 3 , 1 , 2 ) __snake_case : Optional[int] = torch.from_numpy(A ) return 2.0 * image - 1.0 class a_ ( UpperCamelCase_ ): def __init__(self , __a , __a , __a , ) -> Any: """simple docstring""" super().__init__() self.register_modules(vqvae=__a , unet=__a , scheduler=__a) @torch.no_grad() def __call__(self , __a = None , __a = 1 , __a = 1_0_0 , __a = 0.0 , __a = None , __a = "pil" , __a = True , ) -> Union[Tuple, ImagePipelineOutput]: """simple docstring""" if isinstance(__a , PIL.Image.Image): __snake_case : Optional[int] = 1 elif isinstance(__a , torch.Tensor): __snake_case : int = image.shape[0] else: raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__a)}""") if isinstance(__a , PIL.Image.Image): __snake_case : Any = preprocess(__a) __snake_case : List[str] = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image __snake_case : List[str] = (batch_size, self.unet.config.in_channels // 2, height, width) __snake_case : Optional[Any] = next(self.unet.parameters()).dtype __snake_case : Tuple = randn_tensor(__a , generator=__a , device=self.device , dtype=__a) __snake_case : Optional[int] = image.to(device=self.device , dtype=__a) # set timesteps and move to the correct device self.scheduler.set_timesteps(__a , device=self.device) __snake_case : Optional[int] = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler __snake_case : str = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __snake_case : List[Any] = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) __snake_case : Optional[Any] = {} if accepts_eta: __snake_case : List[str] = eta for t in self.progress_bar(__a): # concat latents and low resolution image in the channel dimension. __snake_case : Optional[Any] = torch.cat([latents, image] , dim=1) __snake_case : Any = self.scheduler.scale_model_input(__a , __a) # predict the noise residual __snake_case : Optional[Any] = self.unet(__a , __a).sample # compute the previous noisy sample x_t -> x_t-1 __snake_case : List[str] = self.scheduler.step(__a , __a , __a , **__a).prev_sample # decode the image latents with the VQVAE __snake_case : Tuple = self.vqvae.decode(__a).sample __snake_case : Any = torch.clamp(__a , -1.0 , 1.0) __snake_case : Dict = image / 2 + 0.5 __snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": __snake_case : Optional[int] = self.numpy_to_pil(__a) if not return_dict: return (image,) return ImagePipelineOutput(images=__a)
701
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class a_ ( UpperCamelCase_ ): _snake_case = """vit_msn""" def __init__(self , __a=7_6_8 , __a=1_2 , __a=1_2 , __a=3_0_7_2 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1E-06 , __a=2_2_4 , __a=1_6 , __a=3 , __a=True , **__a , ) -> Any: """simple docstring""" super().__init__(**__a) __snake_case : List[str] = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : Optional[Any] = num_attention_heads __snake_case : str = intermediate_size __snake_case : List[str] = hidden_act __snake_case : List[Any] = hidden_dropout_prob __snake_case : Tuple = attention_probs_dropout_prob __snake_case : List[str] = initializer_range __snake_case : Optional[int] = layer_norm_eps __snake_case : Dict = image_size __snake_case : int = patch_size __snake_case : Dict = num_channels __snake_case : Tuple = qkv_bias
61
0
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { '''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''', '''Salesforce/blip-vqa-capfit-large''': ( '''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json''' ), '''Salesforce/blip-image-captioning-base''': ( '''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json''' ), '''Salesforce/blip-image-captioning-large''': ( '''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json''' ), '''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''', '''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''', '''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''', '''Salesforce/blip-itm-large-flikr''': ( '''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json''' ), } class a_ ( UpperCamelCase_ ): _snake_case = """blip_text_model""" def __init__(self , __a=3_0_5_2_4 , __a=7_6_8 , __a=7_6_8 , __a=3_0_7_2 , __a=7_6_8 , __a=1_2 , __a=8 , __a=5_1_2 , __a="gelu" , __a=1E-12 , __a=0.0 , __a=0.0 , __a=0.02 , __a=3_0_5_2_2 , __a=2 , __a=0 , __a=1_0_2 , __a=True , __a=True , **__a , ) -> Tuple: """simple docstring""" super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , ) __snake_case : int = vocab_size __snake_case : Tuple = hidden_size __snake_case : Optional[Any] = encoder_hidden_size __snake_case : Tuple = intermediate_size __snake_case : Any = projection_dim __snake_case : Any = hidden_dropout_prob __snake_case : List[str] = num_hidden_layers __snake_case : Dict = num_attention_heads __snake_case : Any = max_position_embeddings __snake_case : Any = layer_norm_eps __snake_case : str = hidden_act __snake_case : str = initializer_range __snake_case : int = attention_probs_dropout_prob __snake_case : Any = is_decoder __snake_case : str = use_cache @classmethod def SCREAMING_SNAKE_CASE__ (cls , __a , **__a) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(__a) __snake_case : List[str] = cls.get_config_dict(__a , **__a) # get the text config dict if we are loading from BlipConfig if config_dict.get('model_type') == "blip": __snake_case : str = config_dict['text_config'] if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(__a , **__a) class a_ ( UpperCamelCase_ ): _snake_case = """blip_vision_model""" def __init__(self , __a=7_6_8 , __a=3_0_7_2 , __a=5_1_2 , __a=1_2 , __a=1_2 , __a=3_8_4 , __a=1_6 , __a="gelu" , __a=1E-5 , __a=0.0 , __a=1E-10 , **__a , ) -> int: """simple docstring""" super().__init__(**__a) __snake_case : Tuple = hidden_size __snake_case : Any = intermediate_size __snake_case : int = projection_dim __snake_case : List[Any] = num_hidden_layers __snake_case : List[Any] = num_attention_heads __snake_case : List[str] = patch_size __snake_case : Any = image_size __snake_case : Optional[Any] = initializer_range __snake_case : List[str] = attention_dropout __snake_case : List[Any] = layer_norm_eps __snake_case : Optional[int] = hidden_act @classmethod def SCREAMING_SNAKE_CASE__ (cls , __a , **__a) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(__a) __snake_case : Dict = cls.get_config_dict(__a , **__a) # get the vision config dict if we are loading from BlipConfig if config_dict.get('model_type') == "blip": __snake_case : Any = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(__a , **__a) class a_ ( UpperCamelCase_ ): _snake_case = """blip""" _snake_case = True def __init__(self , __a=None , __a=None , __a=5_1_2 , __a=2.6_592 , __a=2_5_6 , **__a , ) -> List[str]: """simple docstring""" super().__init__(**__a) if text_config is None: __snake_case : str = {} logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.') if vision_config is None: __snake_case : Dict = {} logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.') __snake_case : Union[str, Any] = BlipTextConfig(**__a) __snake_case : List[str] = BlipVisionConfig(**__a) __snake_case : List[Any] = self.vision_config.hidden_size __snake_case : Union[str, Any] = projection_dim __snake_case : Any = logit_scale_init_value __snake_case : Tuple = 1.0 __snake_case : Optional[int] = 0.02 __snake_case : Union[str, Any] = image_text_hidden_size @classmethod def SCREAMING_SNAKE_CASE__ (cls , __a , __a , **__a) -> Any: """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Union[str, Any] = copy.deepcopy(self.__dict__) __snake_case : Tuple = self.text_config.to_dict() __snake_case : List[str] = self.vision_config.to_dict() __snake_case : str = self.__class__.model_type return output
702
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : float , A : list[float] ) -> float: """simple docstring""" if discount_rate < 0: raise ValueError('Discount rate cannot be negative' ) if not cash_flows: raise ValueError('Cash flows list cannot be empty' ) __snake_case : List[str] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(A ) ) return round(A , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
61
0
'''simple docstring''' import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class a_ : def __init__(self , __a , __a = 1_3 , __a = 6_4 , __a = 2 , __a = 3 , __a = 3 , __a = True , __a = True , __a = 1_2_8 , __a=[1_6, 3_2, 6_4, 1_2_8] , __a = 7 , __a = 4 , __a = 3_7 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 1_0 , __a = 0.02 , __a = 2 , __a = 1 , __a = 1_2_8 , __a = [2, 2, 2, 2] , __a = 2 , __a = 2 , ) -> str: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Optional[int] = batch_size __snake_case : Optional[Any] = image_size __snake_case : Optional[int] = patch_size __snake_case : Optional[Any] = num_channels __snake_case : Optional[Any] = is_training __snake_case : Tuple = use_labels __snake_case : Optional[int] = hidden_size __snake_case : Any = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : Tuple = intermediate_size __snake_case : List[str] = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : Dict = type_sequence_label_size __snake_case : str = initializer_range __snake_case : int = encoder_stride __snake_case : List[str] = num_attention_outputs __snake_case : Optional[Any] = embed_dim __snake_case : Optional[Any] = embed_dim + 1 __snake_case : List[str] = resolution __snake_case : Optional[int] = depths __snake_case : List[Any] = hidden_sizes __snake_case : List[str] = dim __snake_case : Union[str, Any] = mlp_expansion_ratio def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __snake_case : List[str] = None if self.use_labels: __snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size) __snake_case : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = TFEfficientFormerModel(config=__a) __snake_case : int = model(__a , training=__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple: """simple docstring""" __snake_case : Dict = self.type_sequence_label_size __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : Optional[int] = model(__a , labels=__a , training=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images __snake_case : List[Any] = 1 __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __snake_case : str = model(__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Union[str, Any] = self.prepare_config_and_inputs() __snake_case : Union[str, Any] = config_and_inputs __snake_case : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) _snake_case = ( { """feature-extraction""": TFEfficientFormerModel, """image-classification""": ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) _snake_case = False _snake_case = False _snake_case = False _snake_case = False _snake_case = False def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Dict = TFEfficientFormerModelTester(self) __snake_case : List[Any] = ConfigTester( self , config_class=__a , has_text_modality=__a , hidden_size=3_7) def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds') def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings') def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[int] = model_class(__a) __snake_case : Union[str, Any] = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Optional[int] = [*signature.parameters.keys()] __snake_case : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" def check_hidden_states_output(__a , __a , __a): __snake_case : str = model_class(__a) __snake_case : List[Any] = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(__a) , __a) if hasattr(self.model_tester , 'encoder_seq_length'): __snake_case : List[Any] = self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length') and self.model_tester.chunk_length > 1: __snake_case : str = seq_length * self.model_tester.chunk_length else: __snake_case : Optional[int] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: __snake_case : List[Any] = outputs.decoder_hidden_states self.asseretIsInstance(__a , (list, tuple)) self.assertEqual(len(__a) , __a) __snake_case : List[str] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'decoder_seq_length' , __a) self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , ) __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : List[str] = True check_hidden_states_output(__a , __a , __a) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(__a , __a , __a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a=False) -> int: """simple docstring""" __snake_case : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet') def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = TFEfficientFormerModel.from_pretrained(__a) self.assertIsNotNone(__a) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Tuple = True __snake_case : Optional[Any] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'key_length' , __a) __snake_case : Optional[Any] = getattr(self.model_tester , 'chunk_length' , __a) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes'): __snake_case : str = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: __snake_case : Optional[Any] = True __snake_case : Dict = False __snake_case : Optional[int] = True __snake_case : Dict = model_class(__a) __snake_case : Tuple = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Dict = True __snake_case : str = model_class(__a) __snake_case : str = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model __snake_case : Tuple = model_class(__a) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes __snake_case : Optional[Any] = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__a) for key, val in model.input_signature.items() if key in model.dummy_inputs } __snake_case : Tuple = model(__a) self.assertTrue(outputs_dict is not None) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" __snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class a_ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : List[str] = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300') __snake_case : Optional[int] = self.default_image_processor __snake_case : List[Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : List[str] = model(**__a , training=__a) # verify the logits __snake_case : str = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4)) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300') __snake_case : List[Any] = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : Optional[int] = model(**__a , training=__a) # verify the logits __snake_case : Optional[int] = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : List[str] = tf.constant([-0.1_312, 0.4_353, -1.0_499]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4))
703
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A = { '''configuration_distilbert''': [ '''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DistilBertConfig''', '''DistilBertOnnxConfig''', ], '''tokenization_distilbert''': ['''DistilBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''DistilBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DistilBertForMaskedLM''', '''DistilBertForMultipleChoice''', '''DistilBertForQuestionAnswering''', '''DistilBertForSequenceClassification''', '''DistilBertForTokenClassification''', '''DistilBertModel''', '''DistilBertPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDistilBertForMaskedLM''', '''TFDistilBertForMultipleChoice''', '''TFDistilBertForQuestionAnswering''', '''TFDistilBertForSequenceClassification''', '''TFDistilBertForTokenClassification''', '''TFDistilBertMainLayer''', '''TFDistilBertModel''', '''TFDistilBertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''FlaxDistilBertForMaskedLM''', '''FlaxDistilBertForMultipleChoice''', '''FlaxDistilBertForQuestionAnswering''', '''FlaxDistilBertForSequenceClassification''', '''FlaxDistilBertForTokenClassification''', '''FlaxDistilBertModel''', '''FlaxDistilBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { '''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''', # See all GLPN models at https://huggingface.co/models?filter=glpn } class a_ ( UpperCamelCase_ ): _snake_case = """glpn""" def __init__(self , __a=3 , __a=4 , __a=[2, 2, 2, 2] , __a=[8, 4, 2, 1] , __a=[3_2, 6_4, 1_6_0, 2_5_6] , __a=[7, 3, 3, 3] , __a=[4, 2, 2, 2] , __a=[1, 2, 5, 8] , __a=[4, 4, 4, 4] , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=0.1 , __a=1E-6 , __a=6_4 , __a=1_0 , __a=-1 , **__a , ) -> List[str]: """simple docstring""" super().__init__(**__a) __snake_case : int = num_channels __snake_case : int = num_encoder_blocks __snake_case : Union[str, Any] = depths __snake_case : Optional[Any] = sr_ratios __snake_case : Optional[Any] = hidden_sizes __snake_case : Tuple = patch_sizes __snake_case : str = strides __snake_case : List[str] = mlp_ratios __snake_case : Tuple = num_attention_heads __snake_case : Optional[int] = hidden_act __snake_case : int = hidden_dropout_prob __snake_case : Union[str, Any] = attention_probs_dropout_prob __snake_case : int = initializer_range __snake_case : Optional[Any] = drop_path_rate __snake_case : Optional[int] = layer_norm_eps __snake_case : List[Any] = decoder_hidden_size __snake_case : Tuple = max_depth __snake_case : Optional[int] = head_in_index
704
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __A = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def _SCREAMING_SNAKE_CASE ( A : Tuple ) -> str: """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(A ) def _SCREAMING_SNAKE_CASE ( A : int ) -> Optional[int]: """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main __snake_case : Any = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(A , id=A )
61
0
'''simple docstring''' import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class a_ ( UpperCamelCase_ ): _snake_case = (KDPMaDiscreteScheduler,) _snake_case = 10 def SCREAMING_SNAKE_CASE__ (self , **__a) -> List[str]: """simple docstring""" __snake_case : Any = { 'num_train_timesteps': 1_1_0_0, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', } config.update(**__a) return config def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__a) def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02]): self.check_over_configs(beta_start=__a , beta_end=__a) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__a) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a) def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" __snake_case : List[Any] = self.scheduler_classes[0] __snake_case : Optional[int] = self.get_scheduler_config(prediction_type='v_prediction') __snake_case : str = scheduler_class(**__a) scheduler.set_timesteps(self.num_inference_steps) __snake_case : str = self.dummy_model() __snake_case : Any = self.dummy_sample_deter * scheduler.init_noise_sigma __snake_case : Dict = sample.to(__a) for i, t in enumerate(scheduler.timesteps): __snake_case : Any = scheduler.scale_model_input(__a , __a) __snake_case : str = model(__a , __a) __snake_case : int = scheduler.step(__a , __a , __a) __snake_case : Tuple = output.prev_sample __snake_case : Optional[Any] = torch.sum(torch.abs(__a)) __snake_case : List[str] = torch.mean(torch.abs(__a)) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6_934E-07) < 1E-2 assert abs(result_mean.item() - 6.1_112E-10) < 1E-3 else: # CUDA assert abs(result_sum.item() - 4.693_428_650_170_972E-07) < 1E-2 assert abs(result_mean.item() - 0.0_002) < 1E-3 def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" if torch_device == "mps": return __snake_case : str = self.scheduler_classes[0] __snake_case : int = self.get_scheduler_config() __snake_case : Optional[int] = scheduler_class(**__a) scheduler.set_timesteps(self.num_inference_steps) __snake_case : List[Any] = self.dummy_model() __snake_case : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma __snake_case : Tuple = sample.to(__a) for i, t in enumerate(scheduler.timesteps): __snake_case : Tuple = scheduler.scale_model_input(__a , __a) __snake_case : Dict = model(__a , __a) __snake_case : Dict = scheduler.step(__a , __a , __a) __snake_case : Optional[int] = output.prev_sample __snake_case : str = torch.sum(torch.abs(__a)) __snake_case : Tuple = torch.mean(torch.abs(__a)) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4_125) < 1E-2 assert abs(result_mean.item() - 0.0_266) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4_125) < 1E-2 assert abs(result_mean.item() - 0.0_266) < 1E-3 def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" if torch_device == "mps": return __snake_case : Dict = self.scheduler_classes[0] __snake_case : str = self.get_scheduler_config() __snake_case : Dict = scheduler_class(**__a) scheduler.set_timesteps(self.num_inference_steps , device=__a) __snake_case : str = self.dummy_model() __snake_case : Tuple = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma for t in scheduler.timesteps: __snake_case : Optional[Any] = scheduler.scale_model_input(__a , __a) __snake_case : str = model(__a , __a) __snake_case : str = scheduler.step(__a , __a , __a) __snake_case : Optional[Any] = output.prev_sample __snake_case : int = torch.sum(torch.abs(__a)) __snake_case : Dict = torch.mean(torch.abs(__a)) if str(__a).startswith('cpu'): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4_125) < 1E-2 assert abs(result_mean.item() - 0.0_266) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4_125) < 1E-2 assert abs(result_mean.item() - 0.0_266) < 1E-3
705
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A = { '''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''], '''tokenization_biogpt''': ['''BioGptTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BioGptForCausalLM''', '''BioGptForTokenClassification''', '''BioGptForSequenceClassification''', '''BioGptModel''', '''BioGptPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
0
'''simple docstring''' import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class a_ ( unittest.TestCase , UpperCamelCase_ ): def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[str] = load_tool('text-to-speech') self.tool.setup() def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" torch.manual_seed(0) __snake_case : Dict = self.tool('hey') __snake_case : List[Any] = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , )) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" torch.manual_seed(0) __snake_case : Any = self.tool('hey') __snake_case : Any = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , ))
706
'''simple docstring''' from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> int: """simple docstring""" if not is_accelerate_available(): return method __snake_case : Optional[Any] = version.parse(accelerate.__version__ ).base_version if version.parse(A ) < version.parse('0.17.0' ): return method def wrapper(self : Optional[Any] , *A : Optional[Any] , **A : Optional[int] ): if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ): self._hf_hook.pre_forward(self ) return method(self , *A , **A ) return wrapper
61
0
'''simple docstring''' from math import pi, sqrt, tan def _SCREAMING_SNAKE_CASE ( A : float ) -> float: """simple docstring""" if side_length < 0: raise ValueError('surface_area_cube() only accepts non-negative values' ) return 6 * side_length**2 def _SCREAMING_SNAKE_CASE ( A : float , A : float , A : float ) -> float: """simple docstring""" if length < 0 or breadth < 0 or height < 0: raise ValueError('surface_area_cuboid() only accepts non-negative values' ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def _SCREAMING_SNAKE_CASE ( A : float ) -> float: """simple docstring""" if radius < 0: raise ValueError('surface_area_sphere() only accepts non-negative values' ) return 4 * pi * radius**2 def _SCREAMING_SNAKE_CASE ( A : float ) -> float: """simple docstring""" if radius < 0: raise ValueError('surface_area_hemisphere() only accepts non-negative values' ) return 3 * pi * radius**2 def _SCREAMING_SNAKE_CASE ( A : float , A : float ) -> float: """simple docstring""" if radius < 0 or height < 0: raise ValueError('surface_area_cone() only accepts non-negative values' ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def _SCREAMING_SNAKE_CASE ( A : float , A : float , A : float ) -> float: """simple docstring""" if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( 'surface_area_conical_frustum() only accepts non-negative values' ) __snake_case : Tuple = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def _SCREAMING_SNAKE_CASE ( A : float , A : float ) -> float: """simple docstring""" if radius < 0 or height < 0: raise ValueError('surface_area_cylinder() only accepts non-negative values' ) return 2 * pi * radius * (height + radius) def _SCREAMING_SNAKE_CASE ( A : float , A : float ) -> float: """simple docstring""" if torus_radius < 0 or tube_radius < 0: raise ValueError('surface_area_torus() only accepts non-negative values' ) if torus_radius < tube_radius: raise ValueError( 'surface_area_torus() does not support spindle or self intersecting tori' ) return 4 * pow(A , 2 ) * torus_radius * tube_radius def _SCREAMING_SNAKE_CASE ( A : float , A : float ) -> float: """simple docstring""" if length < 0 or width < 0: raise ValueError('area_rectangle() only accepts non-negative values' ) return length * width def _SCREAMING_SNAKE_CASE ( A : float ) -> float: """simple docstring""" if side_length < 0: raise ValueError('area_square() only accepts non-negative values' ) return side_length**2 def _SCREAMING_SNAKE_CASE ( A : float , A : float ) -> float: """simple docstring""" if base < 0 or height < 0: raise ValueError('area_triangle() only accepts non-negative values' ) return (base * height) / 2 def _SCREAMING_SNAKE_CASE ( A : float , A : float , A : float ) -> float: """simple docstring""" if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError('area_triangle_three_sides() only accepts non-negative values' ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError('Given three sides do not form a triangle' ) __snake_case : List[Any] = (sidea + sidea + sidea) / 2 __snake_case : int = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def _SCREAMING_SNAKE_CASE ( A : float , A : float ) -> float: """simple docstring""" if base < 0 or height < 0: raise ValueError('area_parallelogram() only accepts non-negative values' ) return base * height def _SCREAMING_SNAKE_CASE ( A : float , A : float , A : float ) -> float: """simple docstring""" if basea < 0 or basea < 0 or height < 0: raise ValueError('area_trapezium() only accepts non-negative values' ) return 1 / 2 * (basea + basea) * height def _SCREAMING_SNAKE_CASE ( A : float ) -> float: """simple docstring""" if radius < 0: raise ValueError('area_circle() only accepts non-negative values' ) return pi * radius**2 def _SCREAMING_SNAKE_CASE ( A : float , A : float ) -> float: """simple docstring""" if radius_x < 0 or radius_y < 0: raise ValueError('area_ellipse() only accepts non-negative values' ) return pi * radius_x * radius_y def _SCREAMING_SNAKE_CASE ( A : float , A : float ) -> float: """simple docstring""" if diagonal_a < 0 or diagonal_a < 0: raise ValueError('area_rhombus() only accepts non-negative values' ) return 1 / 2 * diagonal_a * diagonal_a def _SCREAMING_SNAKE_CASE ( A : int , A : float ) -> float: """simple docstring""" if not isinstance(A , A ) or sides < 3: raise ValueError( 'area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides' ) elif length < 0: raise ValueError( 'area_reg_polygon() only accepts non-negative values as \ length of a side' ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print('''[DEMO] Areas of various geometric shapes: \n''') print(f'''Rectangle: {area_rectangle(1_0, 2_0) = }''') print(f'''Square: {area_square(1_0) = }''') print(f'''Triangle: {area_triangle(1_0, 1_0) = }''') print(f'''Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }''') print(f'''Parallelogram: {area_parallelogram(1_0, 2_0) = }''') print(f'''Rhombus: {area_rhombus(1_0, 2_0) = }''') print(f'''Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }''') print(f'''Circle: {area_circle(2_0) = }''') print(f'''Ellipse: {area_ellipse(1_0, 2_0) = }''') print('''\nSurface Areas of various geometric shapes: \n''') print(f'''Cube: {surface_area_cube(2_0) = }''') print(f'''Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }''') print(f'''Sphere: {surface_area_sphere(2_0) = }''') print(f'''Hemisphere: {surface_area_hemisphere(2_0) = }''') print(f'''Cone: {surface_area_cone(1_0, 2_0) = }''') print(f'''Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }''') print(f'''Cylinder: {surface_area_cylinder(1_0, 2_0) = }''') print(f'''Torus: {surface_area_torus(2_0, 1_0) = }''') print(f'''Equilateral Triangle: {area_reg_polygon(3, 1_0) = }''') print(f'''Square: {area_reg_polygon(4, 1_0) = }''') print(f'''Reqular Pentagon: {area_reg_polygon(5, 1_0) = }''')
707
'''simple docstring''' import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class a_ ( unittest.TestCase , UpperCamelCase_ ): def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[str] = load_tool('text-to-speech') self.tool.setup() def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" torch.manual_seed(0) __snake_case : Dict = self.tool('hey') __snake_case : List[Any] = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , )) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" torch.manual_seed(0) __snake_case : Any = self.tool('hey') __snake_case : Any = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , ))
61
0
'''simple docstring''' import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = BlenderbotSmallTokenizer _snake_case = False def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" super().setUp() __snake_case : List[Any] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__'] __snake_case : Optional[Any] = dict(zip(__a , range(len(__a)))) __snake_case : List[str] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', ''] __snake_case : Optional[int] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'} __snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) __snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: fp.write(json.dumps(__a) + '\n') with open(self.merges_file , 'w' , encoding='utf-8') as fp: fp.write('\n'.join(__a)) def SCREAMING_SNAKE_CASE__ (self , **__a) -> Tuple: """simple docstring""" kwargs.update(self.special_tokens_map) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__a) def SCREAMING_SNAKE_CASE__ (self , __a) -> int: """simple docstring""" __snake_case : Optional[int] = 'adapt act apte' __snake_case : str = 'adapt act apte' return input_text, output_text def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : List[Any] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map) __snake_case : Dict = 'adapt act apte' __snake_case : List[str] = ['adapt', 'act', 'ap@@', 'te'] __snake_case : int = tokenizer.tokenize(__a) self.assertListEqual(__a , __a) __snake_case : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] __snake_case : str = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , __a) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : Dict = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M') assert tok('sam').input_ids == [1_3_8_4] __snake_case : int = 'I am a small frog.' __snake_case : Optional[Any] = tok([src_text] , padding=__a , truncation=__a)['input_ids'] __snake_case : int = tok.batch_decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a)[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Any = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M') __snake_case : str = 'I am a small frog .' __snake_case : Tuple = '.' __snake_case : int = tok(__a)['input_ids'] __snake_case : Optional[Any] = tok(__a)['input_ids'] assert encoded[-1] == encoded_dot[0]
708
'''simple docstring''' import math class a_ : def __init__(self , __a=0) -> Any: # a graph with Node 0,1,...,N-1 """simple docstring""" __snake_case : List[str] = n __snake_case : Tuple = [ [math.inf for j in range(0 , __a)] for i in range(0 , __a) ] # adjacency matrix for weight __snake_case : Union[str, Any] = [ [math.inf for j in range(0 , __a)] for i in range(0 , __a) ] # dp[i][j] stores minimum distance from i to j def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple: """simple docstring""" __snake_case : Union[str, Any] = w def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" for k in range(0 , self.n): for i in range(0 , self.n): for j in range(0 , self.n): __snake_case : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j]) def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]: """simple docstring""" return self.dp[u][v] if __name__ == "__main__": __A = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 1_0) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 1_0) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
61
0
'''simple docstring''' import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( '''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion''' ) __A = None __A = { '''7B''': 1_1_0_0_8, '''13B''': 1_3_8_2_4, '''30B''': 1_7_9_2_0, '''65B''': 2_2_0_1_6, '''70B''': 2_8_6_7_2, } __A = { '''7B''': 1, '''7Bf''': 1, '''13B''': 2, '''13Bf''': 2, '''30B''': 4, '''65B''': 8, '''70B''': 8, '''70Bf''': 8, } def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : Tuple=1 , A : Union[str, Any]=2_56 ) -> int: """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def _SCREAMING_SNAKE_CASE ( A : int ) -> List[str]: """simple docstring""" with open(A , 'r' ) as f: return json.load(A ) def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] , A : Any ) -> str: """simple docstring""" with open(A , 'w' ) as f: json.dump(A , A ) def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] , A : Union[str, Any] , A : Union[str, Any] , A : Optional[int]=True ) -> List[str]: """simple docstring""" os.makedirs(A , exist_ok=A ) __snake_case : str = os.path.join(A , 'tmp' ) os.makedirs(A , exist_ok=A ) __snake_case : int = read_json(os.path.join(A , 'params.json' ) ) __snake_case : int = NUM_SHARDS[model_size] __snake_case : Any = params['n_layers'] __snake_case : Any = params['n_heads'] __snake_case : int = n_heads // num_shards __snake_case : Optional[int] = params['dim'] __snake_case : str = dim // n_heads __snake_case : Dict = 10000.0 __snake_case : List[Any] = 1.0 / (base ** (torch.arange(0 , A , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: __snake_case : Tuple = params['n_kv_heads'] # for GQA / MQA __snake_case : Any = n_heads_per_shard // num_key_value_heads __snake_case : Any = dim // num_key_value_heads else: # compatibility with other checkpoints __snake_case : Any = n_heads __snake_case : Union[str, Any] = n_heads_per_shard __snake_case : Union[str, Any] = dim # permute for sliced rotary def permute(A : Tuple , A : List[str]=n_heads , A : Dict=dim , A : Union[str, Any]=dim ): return w.view(A , dima // n_heads // 2 , 2 , A ).transpose(1 , 2 ).reshape(A , A ) print(F"""Fetching all parameters from the checkpoint at {input_base_path}.""" ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) __snake_case : str = torch.load(os.path.join(A , 'consolidated.00.pth' ) , map_location='cpu' ) else: # Sharded __snake_case : int = [ torch.load(os.path.join(A , F"""consolidated.{i:02d}.pth""" ) , map_location='cpu' ) for i in range(A ) ] __snake_case : Dict = 0 __snake_case : Any = {'weight_map': {}} for layer_i in range(A ): __snake_case : str = F"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin""" if model_size == "7B": # Unsharded __snake_case : List[str] = { F"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute( loaded[F"""layers.{layer_i}.attention.wq.weight"""] ), F"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute( loaded[F"""layers.{layer_i}.attention.wk.weight"""] ), F"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[F"""layers.{layer_i}.attention.wv.weight"""], F"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[F"""layers.{layer_i}.attention.wo.weight"""], F"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w1.weight"""], F"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w2.weight"""], F"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w3.weight"""], F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[F"""layers.{layer_i}.attention_norm.weight"""], F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[F"""layers.{layer_i}.ffn_norm.weight"""], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. __snake_case : Optional[int] = { F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][ F"""layers.{layer_i}.attention_norm.weight""" ].clone(), F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][ F"""layers.{layer_i}.ffn_norm.weight""" ].clone(), } __snake_case : Optional[int] = permute( torch.cat( [ loaded[i][F"""layers.{layer_i}.attention.wq.weight"""].view(A , A , A ) for i in range(A ) ] , dim=0 , ).reshape(A , A ) ) __snake_case : Any = permute( torch.cat( [ loaded[i][F"""layers.{layer_i}.attention.wk.weight"""].view( A , A , A ) for i in range(A ) ] , dim=0 , ).reshape(A , A ) , A , A , A , ) __snake_case : Tuple = torch.cat( [ loaded[i][F"""layers.{layer_i}.attention.wv.weight"""].view( A , A , A ) for i in range(A ) ] , dim=0 , ).reshape(A , A ) __snake_case : Tuple = torch.cat( [loaded[i][F"""layers.{layer_i}.attention.wo.weight"""] for i in range(A )] , dim=1 ) __snake_case : Union[str, Any] = torch.cat( [loaded[i][F"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(A )] , dim=0 ) __snake_case : int = torch.cat( [loaded[i][F"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(A )] , dim=1 ) __snake_case : Any = torch.cat( [loaded[i][F"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(A )] , dim=0 ) __snake_case : str = inv_freq for k, v in state_dict.items(): __snake_case : Union[str, Any] = filename param_count += v.numel() torch.save(A , os.path.join(A , A ) ) __snake_case : Optional[int] = F"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin""" if model_size == "7B": # Unsharded __snake_case : List[str] = { 'model.embed_tokens.weight': loaded['tok_embeddings.weight'], 'model.norm.weight': loaded['norm.weight'], 'lm_head.weight': loaded['output.weight'], } else: __snake_case : str = { 'model.norm.weight': loaded[0]['norm.weight'], 'model.embed_tokens.weight': torch.cat( [loaded[i]['tok_embeddings.weight'] for i in range(A )] , dim=1 ), 'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(A )] , dim=0 ), } for k, v in state_dict.items(): __snake_case : Union[str, Any] = filename param_count += v.numel() torch.save(A , os.path.join(A , A ) ) # Write configs __snake_case : Optional[int] = {'total_size': param_count * 2} write_json(A , os.path.join(A , 'pytorch_model.bin.index.json' ) ) __snake_case : List[str] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1 __snake_case : Tuple = params['multiple_of'] if 'multiple_of' in params else 2_56 __snake_case : Optional[Any] = LlamaConfig( hidden_size=A , intermediate_size=compute_intermediate_size(A , A , A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=A , ) config.save_pretrained(A ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('Loading the checkpoint in a Llama model.' ) __snake_case : List[str] = LlamaForCausalLM.from_pretrained(A , torch_dtype=torch.floataa , low_cpu_mem_usage=A ) # Avoid saving this as part of the config. del model.config._name_or_path print('Saving in the Transformers format.' ) model.save_pretrained(A , safe_serialization=A ) shutil.rmtree(A ) def _SCREAMING_SNAKE_CASE ( A : Dict , A : Dict ) -> Dict: """simple docstring""" __snake_case : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(F"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" ) __snake_case : str = tokenizer_class(A ) tokenizer.save_pretrained(A ) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" __snake_case : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , ) parser.add_argument( '--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , ) parser.add_argument( '--output_dir' , help='Location to write HF model and tokenizer' , ) parser.add_argument('--safe_serialization' , type=A , help='Whether or not to save using `safetensors`.' ) __snake_case : Tuple = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) __snake_case : Optional[int] = os.path.join(args.input_dir , 'tokenizer.model' ) write_tokenizer(args.output_dir , A ) if __name__ == "__main__": main()
709
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __A = logging.get_logger(__name__) class a_ ( UpperCamelCase_ ): _snake_case = ["""pixel_values"""] def __init__(self , __a = True , __a = None , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = 1 / 2_5_5 , __a = True , __a = None , __a = None , **__a , ) -> None: """simple docstring""" super().__init__(**__a) __snake_case : Tuple = size if size is not None else {'shortest_edge': 3_8_4} __snake_case : List[Any] = get_size_dict(__a , default_to_square=__a) __snake_case : int = do_resize __snake_case : List[str] = size # Default value set here for backwards compatibility where the value in config is None __snake_case : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 __snake_case : Tuple = resample __snake_case : Dict = do_rescale __snake_case : Any = rescale_factor __snake_case : str = do_normalize __snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __snake_case : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray: """simple docstring""" __snake_case : Dict = get_size_dict(__a , default_to_square=__a) if "shortest_edge" not in size: raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""") __snake_case : List[str] = size['shortest_edge'] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __snake_case : Any = int(shortest_edge / crop_pct) __snake_case : Any = get_resize_output_image_size(__a , size=__a , default_to_square=__a) __snake_case : int = resize(image=__a , size=__a , resample=__a , data_format=__a , **__a) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__a , size=(shortest_edge, shortest_edge) , data_format=__a , **__a) else: # warping (no cropping) when evaluated at 384 or larger return resize( __a , size=(shortest_edge, shortest_edge) , resample=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , **__a , ) -> Any: """simple docstring""" return rescale(__a , scale=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray: """simple docstring""" return normalize(__a , mean=__a , std=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> PIL.Image.Image: """simple docstring""" __snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize __snake_case : Dict = crop_pct if crop_pct is not None else self.crop_pct __snake_case : Tuple = resample if resample is not None else self.resample __snake_case : Any = do_rescale if do_rescale is not None else self.do_rescale __snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize __snake_case : Optional[int] = image_mean if image_mean is not None else self.image_mean __snake_case : Optional[Any] = image_std if image_std is not None else self.image_std __snake_case : List[str] = size if size is not None else self.size __snake_case : Any = get_size_dict(__a , default_to_square=__a) __snake_case : Dict = make_list_of_images(__a) if not valid_images(__a): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.') if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # All transformations expect numpy arrays. __snake_case : Tuple = [to_numpy_array(__a) for image in images] if do_resize: __snake_case : Optional[int] = [self.resize(image=__a , size=__a , crop_pct=__a , resample=__a) for image in images] if do_rescale: __snake_case : Optional[int] = [self.rescale(image=__a , scale=__a) for image in images] if do_normalize: __snake_case : Any = [self.normalize(image=__a , mean=__a , std=__a) for image in images] __snake_case : Dict = [to_channel_dimension_format(__a , __a) for image in images] __snake_case : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=__a , tensor_type=__a)
61
0
import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def _SCREAMING_SNAKE_CASE ( A : str ) -> Tuple: """simple docstring""" __snake_case : Dict = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', 'decoder.output_projection.weight', '_float_tensor', 'encoder.embed_positions._float_tensor', 'decoder.embed_positions._float_tensor', ] for k in ignore_keys: state_dict.pop(A , A ) def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" __snake_case : Optional[int] = emb.weight.shape __snake_case : str = nn.Linear(A , A , bias=A ) __snake_case : int = emb.weight.data return lin_layer def _SCREAMING_SNAKE_CASE ( A : int , A : Dict=None ) -> List[str]: """simple docstring""" __snake_case : Optional[Any] = {} for old_key in state_dict.keys(): __snake_case : Optional[int] = old_key if "moe_layer.experts." in key: if expert_idx is not None: __snake_case : Any = key.replace('moe_layer.experts.0' , F"""ffn.experts.expert_{expert_idx}""" ) else: __snake_case : Union[str, Any] = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' ) if "gate" in key: __snake_case : List[Any] = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' ) if "fc2" and "experts" not in key: __snake_case : List[Any] = key.replace('.fc2.' , '.ffn.fc2.' ) if "fc1" and "experts" not in key: __snake_case : Dict = key.replace('.fc1.' , '.ffn.fc1.' ) if ".encoder_attn." in key: __snake_case : Union[str, Any] = key.replace('.encoder_attn.' , '.cross_attention.' ) if "encoder_attn_layer_norm" in key: __snake_case : Tuple = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' ) if "final_layer_norm" in key: __snake_case : Tuple = key.replace('final_layer_norm' , 'ff_layer_norm' ) __snake_case : Tuple = state_dict[old_key] return new_dict def _SCREAMING_SNAKE_CASE ( A : Optional[Any] , A : Any , A : List[str] , A : str , A : str = WEIGHTS_NAME ) -> Dict: """simple docstring""" __snake_case : Any = [] __snake_case : Optional[int] = 0 os.makedirs(A , exist_ok=A ) for expert in range(A ): __snake_case : int = switch_checkpoint_path + F"""-rank-{expert}.pt""" if os.path.isfile(A ): __snake_case : List[str] = torch.load(A )['model'] remove_ignore_keys_(A ) __snake_case : Optional[Any] = rename_fairseq_keys(A , A ) __snake_case : Union[str, Any] = os.path.join( A , weights_name.replace('.bin' , F"""-{len(A )+1:05d}-of-???.bin""" ) ) torch.save(A , A ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(A )[0]].dtype ) # Add the last block __snake_case : Dict = os.path.join(A , weights_name.replace('.bin' , F"""-{len(A )+1:05d}-of-???.bin""" ) ) __snake_case : List[str] = torch.load(switch_checkpoint_path + '-shared.pt' )['model'] remove_ignore_keys_(A ) __snake_case : Optional[int] = rename_fairseq_keys(A , A ) __snake_case : Optional[int] = shared_weights['decoder.embed_tokens.weight'] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(A ) == 1: __snake_case : List[str] = os.path.join(A , A ) torch.save(A , A ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(A , A ) # Otherwise, let's build the index __snake_case : Union[str, Any] = {} for idx, shard in enumerate(A ): __snake_case : Tuple = weights_name.replace('.bin' , F"""-{idx+1:05d}-of-{len(A ):05d}.bin""" ) __snake_case : Dict = os.path.join(A , weights_name.replace('.bin' , F"""-{idx+1:05d}-of-???.bin""" ) ) os.rename(A , os.path.join(A , A ) ) for key in shard: __snake_case : Optional[Any] = shard_file # Add the metadata __snake_case : Tuple = {'total_size': total_size} __snake_case : Dict = {'metadata': metadata, 'weight_map': weight_map} with open(os.path.join(A , A ) , 'w' , encoding='utf-8' ) as f: __snake_case : int = json.dumps(A , indent=2 , sort_keys=A ) + '\n' f.write(A ) return metadata, index if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--nllb_moe_checkpoint_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''', type=str, required=False, help='''Path to a directory containing a folder per layer. Follows the original Google format.''', ) parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''') parser.add_argument( '''--pytorch_dump_folder_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''', type=str, required=False, help='''Path to the output pytorch model.''', ) __A = parser.parse_args() __A , __A = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_2_8, args.dtype, ) __A = NllbMoeConfig.from_pretrained( '''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8 ) config.save_pretrained(args.pytorch_dump_folder_path) __A = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('''Done''') model.save_pretrained(args.pytorch_dump_folder_path)
710
'''simple docstring''' from functools import lru_cache @lru_cache def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" if num < 0: raise ValueError('Number should not be negative.' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
61
0
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _SCREAMING_SNAKE_CASE ( A : Any ) -> Union[str, Any]: """simple docstring""" __snake_case : List[str] = filter(lambda A : p.requires_grad , model.parameters() ) __snake_case : int = sum([np.prod(p.size() ) for p in model_parameters] ) return params __A = logging.getLogger(__name__) def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : int ) -> Any: """simple docstring""" if metric == "rouge2": __snake_case : Tuple = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": __snake_case : Tuple = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": __snake_case : Optional[int] = '{val_avg_em:.4f}-{step_count}' elif metric == "loss": __snake_case : Union[str, Any] = '{val_avg_loss:.4f}-{step_count}' else: raise NotImplementedError( F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" ' function.' ) __snake_case : str = ModelCheckpoint( dirpath=A , filename=A , monitor=F"""val_{metric}""" , mode='max' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : Tuple ) -> Dict: """simple docstring""" return EarlyStopping( monitor=F"""val_{metric}""" , mode='min' if 'loss' in metric else 'max' , patience=A , verbose=A , ) class a_ ( pl.Callback ): def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> str: """simple docstring""" __snake_case : int = {F"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)} pl_module.logger.log_metrics(__a) @rank_zero_only def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a=True) -> None: """simple docstring""" logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""") __snake_case : List[str] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']}) # Log results __snake_case : int = Path(pl_module.hparams.output_dir) if type_path == "test": __snake_case : Any = od / 'test_results.txt' __snake_case : Any = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __snake_case : Optional[Any] = od / F"""{type_path}_results/{trainer.global_step:05d}.txt""" __snake_case : Union[str, Any] = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=__a) generations_file.parent.mkdir(exist_ok=__a) with open(__a , 'a+') as writer: for key in sorted(__a): if key in ["log", "progress_bar", "preds"]: continue __snake_case : Dict = metrics[key] if isinstance(__a , torch.Tensor): __snake_case : str = val.item() __snake_case : Tuple = F"""{key}: {val:.6f}\n""" writer.write(__a) if not save_generations: return if "preds" in metrics: __snake_case : Tuple = '\n'.join(metrics['preds']) generations_file.open('w+').write(__a) @rank_zero_only def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Dict: """simple docstring""" try: __snake_case : List[Any] = pl_module.model.model.num_parameters() except AttributeError: __snake_case : Dict = pl_module.model.num_parameters() __snake_case : Union[str, Any] = count_trainable_parameters(__a) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6}) @rank_zero_only def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]: """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path) return self._write_logs(__a , __a , 'test') @rank_zero_only def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> str: """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
711
'''simple docstring''' import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = VQModel _snake_case = """sample""" @property def SCREAMING_SNAKE_CASE__ (self , __a=(3_2, 3_2)) -> str: """simple docstring""" __snake_case : Dict = 4 __snake_case : Optional[int] = 3 __snake_case : str = floats_tensor((batch_size, num_channels) + sizes).to(__a) return {"sample": image} @property def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" return (3, 3_2, 3_2) @property def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" return (3, 3_2, 3_2) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = { 'block_out_channels': [3_2, 6_4], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 3, } __snake_case : List[Any] = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case ,__snake_case : List[Any] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=__a) self.assertIsNotNone(__a) self.assertEqual(len(loading_info['missing_keys']) , 0) model.to(__a) __snake_case : Any = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = VQModel.from_pretrained('fusing/vqgan-dummy') model.to(__a).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) __snake_case : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size) __snake_case : Optional[int] = image.to(__a) with torch.no_grad(): __snake_case : List[Any] = model(__a).sample __snake_case : int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off __snake_case : int = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143]) # fmt: on self.assertTrue(torch.allclose(__a , __a , atol=1E-3))
61
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : int = 10_00 ) -> int: """simple docstring""" return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
712
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __A = logging.getLogger(__name__) @dataclass class a_ : _snake_case = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) _snake_case = field( default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) _snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class a_ : _snake_case = field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , ) _snake_case = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case ,__snake_case ,__snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case ,__snake_case ,__snake_case : int = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ' --overwrite_output_dir to overcome.' ) __snake_case : List[str] = import_module('tasks' ) try: __snake_case : Any = getattr(A , model_args.task_type ) __snake_case : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , A ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task __snake_case : Optional[Any] = token_classification_task.get_labels(data_args.labels ) __snake_case : Dict[int, str] = dict(enumerate(A ) ) __snake_case : Optional[Any] = len(A ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , ) __snake_case : List[str] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) __snake_case : Optional[int] = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , ) # Get datasets __snake_case : List[Any] = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __snake_case : int = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(A : np.ndarray , A : np.ndarray ) -> Tuple[List[int], List[int]]: __snake_case : str = np.argmax(A , axis=2 ) __snake_case ,__snake_case : int = preds.shape __snake_case : Dict = [[] for _ in range(A )] __snake_case : Union[str, Any] = [[] for _ in range(A )] for i in range(A ): for j in range(A ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(A : EvalPrediction ) -> Dict: __snake_case ,__snake_case : Any = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(A , A ), "precision": precision_score(A , A ), "recall": recall_score(A , A ), "f1": fa_score(A , A ), } # Data collator __snake_case : Optional[int] = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __snake_case : Optional[Any] = Trainer( model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __snake_case : List[Any] = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __snake_case : List[str] = trainer.evaluate() __snake_case : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) results.update(A ) # Predict if training_args.do_predict: __snake_case : str = TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) __snake_case ,__snake_case ,__snake_case : str = trainer.predict(A ) __snake_case ,__snake_case : List[str] = align_predictions(A , A ) __snake_case : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) # Save predictions __snake_case : List[str] = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(A , A , A ) return results def _SCREAMING_SNAKE_CASE ( A : int ) -> Any: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
61
0
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class a_ : def __init__(self , __a=2 , __a=3 , __a=6_4 , __a=None) -> List[Any]: """simple docstring""" __snake_case : List[Any] = np.random.default_rng(__a) __snake_case : Dict = length __snake_case : str = rng.normal(size=(length,)).astype(np.floataa) __snake_case : int = a * self.x + b + rng.normal(scale=0.1 , size=(length,)).astype(np.floataa) def __len__(self) -> Union[str, Any]: """simple docstring""" return self.length def __getitem__(self , __a) -> Optional[int]: """simple docstring""" return {"x": self.x[i], "y": self.y[i]} class a_ ( torch.nn.Module ): def __init__(self , __a=0 , __a=0 , __a=False) -> Optional[Any]: """simple docstring""" super().__init__() __snake_case : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3]).float()) __snake_case : List[Any] = torch.nn.Parameter(torch.tensor([2, 3]).float()) __snake_case : str = True def SCREAMING_SNAKE_CASE__ (self , __a=None) -> Optional[Any]: """simple docstring""" if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""") __snake_case : Optional[Any] = False return x * self.a[0] + self.b[0] class a_ ( torch.nn.Module ): def __init__(self , __a=0 , __a=0 , __a=False) -> List[str]: """simple docstring""" super().__init__() __snake_case : int = torch.nn.Parameter(torch.tensor(__a).float()) __snake_case : Tuple = torch.nn.Parameter(torch.tensor(__a).float()) __snake_case : List[str] = True def SCREAMING_SNAKE_CASE__ (self , __a=None) -> List[Any]: """simple docstring""" if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""") __snake_case : Dict = False return x * self.a + self.b def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : int = 16 ) -> int: """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer __snake_case : List[str] = AutoTokenizer.from_pretrained('bert-base-cased' ) __snake_case : Optional[Any] = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'} __snake_case : Tuple = load_dataset('csv' , data_files=A ) __snake_case : Optional[int] = datasets['train'].unique('label' ) __snake_case : Optional[Any] = {v: i for i, v in enumerate(A )} def tokenize_function(A : List[Any] ): # max_length=None => use the model max length (it's actually the default) __snake_case : Tuple = tokenizer( examples['sentence1'] , examples['sentence2'] , truncation=A , max_length=A , padding='max_length' ) if "label" in examples: __snake_case : Tuple = [label_to_id[l] for l in examples['label']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __snake_case : List[str] = datasets.map( A , batched=A , remove_columns=['sentence1', 'sentence2', 'label'] , ) def collate_fn(A : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(A , padding='max_length' , max_length=1_28 , return_tensors='pt' ) return tokenizer.pad(A , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. __snake_case : Tuple = DataLoader(tokenized_datasets['train'] , shuffle=A , collate_fn=A , batch_size=2 ) __snake_case : Union[str, Any] = DataLoader(tokenized_datasets['validation'] , shuffle=A , collate_fn=A , batch_size=1 ) return train_dataloader, eval_dataloader
713
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : list ) -> list: """simple docstring""" __snake_case : Tuple = False while is_sorted is False: # Until all the indices are traversed keep looping __snake_case : Optional[Any] = True for i in range(0 , len(A ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: __snake_case ,__snake_case : int = input_list[i + 1], input_list[i] # swapping if elements not in order __snake_case : List[Any] = False for i in range(1 , len(A ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: __snake_case ,__snake_case : Tuple = input_list[i + 1], input_list[i] # swapping if elements not in order __snake_case : Any = False return input_list if __name__ == "__main__": print('''Enter list to be sorted''') __A = [int(x) for x in input().split()] # inputing elements of the list in one line __A = odd_even_sort(input_list) print('''The sorted list is''') print(sorted_list)
61
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __A = logging.get_logger(__name__) __A = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class a_ ( UpperCamelCase_ , UpperCamelCase_ ): _snake_case = """focalnet""" def __init__(self , __a=2_2_4 , __a=4 , __a=3 , __a=9_6 , __a=False , __a=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , __a=[2, 2, 6, 2] , __a=[2, 2, 2, 2] , __a=[3, 3, 3, 3] , __a="gelu" , __a=4.0 , __a=0.0 , __a=0.1 , __a=False , __a=1E-4 , __a=False , __a=False , __a=False , __a=0.02 , __a=1E-5 , __a=3_2 , __a=None , __a=None , **__a , ) -> Any: """simple docstring""" super().__init__(**__a) __snake_case : int = image_size __snake_case : Optional[int] = patch_size __snake_case : Optional[int] = num_channels __snake_case : str = embed_dim __snake_case : Optional[Any] = use_conv_embed __snake_case : List[Any] = hidden_sizes __snake_case : Any = depths __snake_case : Tuple = focal_levels __snake_case : int = focal_windows __snake_case : Optional[Any] = hidden_act __snake_case : Optional[int] = mlp_ratio __snake_case : List[str] = hidden_dropout_prob __snake_case : Union[str, Any] = drop_path_rate __snake_case : str = use_layerscale __snake_case : List[Any] = layerscale_value __snake_case : Union[str, Any] = use_post_layernorm __snake_case : Union[str, Any] = use_post_layernorm_in_modulation __snake_case : Optional[int] = normalize_modulator __snake_case : Union[str, Any] = initializer_range __snake_case : Tuple = layer_norm_eps __snake_case : List[str] = encoder_stride __snake_case : List[Any] = ['stem'] + [F"""stage{idx}""" for idx in range(1 , len(self.depths) + 1)] __snake_case : List[Any] = get_aligned_output_features_output_indices( out_features=__a , out_indices=__a , stage_names=self.stage_names)
714
'''simple docstring''' import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger() def _SCREAMING_SNAKE_CASE ( A : int , A : str , A : LevitConfig , A : Path , A : bool = True ) -> Dict: """simple docstring""" print(F"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 1_28: if name[-1] == "S": __snake_case : Optional[int] = timm.create_model('levit_128s' , pretrained=A ) else: __snake_case : Tuple = timm.create_model('levit_128' , pretrained=A ) if hidden_sizes == 1_92: __snake_case : int = timm.create_model('levit_192' , pretrained=A ) if hidden_sizes == 2_56: __snake_case : List[Any] = timm.create_model('levit_256' , pretrained=A ) if hidden_sizes == 3_84: __snake_case : int = timm.create_model('levit_384' , pretrained=A ) from_model.eval() __snake_case : str = LevitForImageClassificationWithTeacher(A ).eval() __snake_case : int = OrderedDict() __snake_case : Optional[Any] = from_model.state_dict() __snake_case : Tuple = list(from_model.state_dict().keys() ) __snake_case : List[str] = list(our_model.state_dict().keys() ) print(len(A ) , len(A ) ) for i in range(len(A ) ): __snake_case : Optional[int] = weights[og_keys[i]] our_model.load_state_dict(A ) __snake_case : Tuple = torch.randn((2, 3, 2_24, 2_24) ) __snake_case : Union[str, Any] = from_model(A ) __snake_case : List[str] = our_model(A ).logits assert torch.allclose(A , A ), "The model logits don't match the original one." __snake_case : int = name print(A ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __snake_case : int = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F"""Pushed {checkpoint_name}""" ) def _SCREAMING_SNAKE_CASE ( A : Path , A : str = None , A : bool = True ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = 'imagenet-1k-id2label.json' __snake_case : Tuple = 10_00 __snake_case : Dict = (1, num_labels) __snake_case : List[str] = 'huggingface/label-files' __snake_case : Any = num_labels __snake_case : str = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) ) __snake_case : Any = {int(A ): v for k, v in idalabel.items()} __snake_case : int = idalabel __snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()} __snake_case : Optional[int] = partial(A , num_labels=A , idalabel=A , labelaid=A ) __snake_case : Dict = { 'levit-128S': 1_28, 'levit-128': 1_28, 'levit-192': 1_92, 'levit-256': 2_56, 'levit-384': 3_84, } __snake_case : Union[str, Any] = { 'levit-128S': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-128': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-192': ImageNetPreTrainedConfig( hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-256': ImageNetPreTrainedConfig( hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-384': ImageNetPreTrainedConfig( hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , A , names_to_config[model_name] , A , A ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , A , A , A , A ) return config, expected_shape if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''levit-dump-folder/''', type=Path, required=False, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) __A = parser.parse_args() __A = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
61
0
'''simple docstring''' import sys __A = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def _SCREAMING_SNAKE_CASE ( A : str = N ) -> int: """simple docstring""" __snake_case : str = -sys.maxsize - 1 for i in range(len(A ) - 12 ): __snake_case : str = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: __snake_case : Union[str, Any] = product return largest_product if __name__ == "__main__": print(f'''{solution() = }''')
715
'''simple docstring''' import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class a_ : def __init__(self , __a , __a = 1_3 , __a = 6_4 , __a = 2 , __a = 3 , __a = 3 , __a = True , __a = True , __a = 1_2_8 , __a=[1_6, 3_2, 6_4, 1_2_8] , __a = 7 , __a = 4 , __a = 3_7 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 1_0 , __a = 0.02 , __a = 2 , __a = 1 , __a = 1_2_8 , __a = [2, 2, 2, 2] , __a = 2 , __a = 2 , ) -> str: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Optional[int] = batch_size __snake_case : Optional[Any] = image_size __snake_case : Optional[int] = patch_size __snake_case : Optional[Any] = num_channels __snake_case : Optional[Any] = is_training __snake_case : Tuple = use_labels __snake_case : Optional[int] = hidden_size __snake_case : Any = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : Tuple = intermediate_size __snake_case : List[str] = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : Dict = type_sequence_label_size __snake_case : str = initializer_range __snake_case : int = encoder_stride __snake_case : List[str] = num_attention_outputs __snake_case : Optional[Any] = embed_dim __snake_case : Optional[Any] = embed_dim + 1 __snake_case : List[str] = resolution __snake_case : Optional[int] = depths __snake_case : List[Any] = hidden_sizes __snake_case : List[str] = dim __snake_case : Union[str, Any] = mlp_expansion_ratio def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __snake_case : List[str] = None if self.use_labels: __snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size) __snake_case : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = TFEfficientFormerModel(config=__a) __snake_case : int = model(__a , training=__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple: """simple docstring""" __snake_case : Dict = self.type_sequence_label_size __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : Optional[int] = model(__a , labels=__a , training=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images __snake_case : List[Any] = 1 __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __snake_case : str = model(__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Union[str, Any] = self.prepare_config_and_inputs() __snake_case ,__snake_case ,__snake_case : Union[str, Any] = config_and_inputs __snake_case : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) _snake_case = ( { """feature-extraction""": TFEfficientFormerModel, """image-classification""": ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) _snake_case = False _snake_case = False _snake_case = False _snake_case = False _snake_case = False def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Dict = TFEfficientFormerModelTester(self) __snake_case : List[Any] = ConfigTester( self , config_class=__a , has_text_modality=__a , hidden_size=3_7) def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds') def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings') def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case ,__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[int] = model_class(__a) __snake_case : Union[str, Any] = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Optional[int] = [*signature.parameters.keys()] __snake_case : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" def check_hidden_states_output(__a , __a , __a): __snake_case : str = model_class(__a) __snake_case : List[Any] = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(__a) , __a) if hasattr(self.model_tester , 'encoder_seq_length'): __snake_case : List[Any] = self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length') and self.model_tester.chunk_length > 1: __snake_case : str = seq_length * self.model_tester.chunk_length else: __snake_case : Optional[int] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: __snake_case : List[Any] = outputs.decoder_hidden_states self.asseretIsInstance(__a , (list, tuple)) self.assertEqual(len(__a) , __a) __snake_case : List[str] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'decoder_seq_length' , __a) self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , ) __snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : List[str] = True check_hidden_states_output(__a , __a , __a) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(__a , __a , __a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a=False) -> int: """simple docstring""" __snake_case : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet') def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = TFEfficientFormerModel.from_pretrained(__a) self.assertIsNotNone(__a) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Tuple = True __snake_case : Optional[Any] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'key_length' , __a) __snake_case : Optional[Any] = getattr(self.model_tester , 'chunk_length' , __a) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes'): __snake_case : str = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: __snake_case : Optional[Any] = True __snake_case : Dict = False __snake_case : Optional[int] = True __snake_case : Dict = model_class(__a) __snake_case : Tuple = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Dict = True __snake_case : str = model_class(__a) __snake_case : str = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case ,__snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model __snake_case : Tuple = model_class(__a) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes __snake_case : Optional[Any] = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__a) for key, val in model.input_signature.items() if key in model.dummy_inputs } __snake_case : Tuple = model(__a) self.assertTrue(outputs_dict is not None) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" __snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class a_ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : List[str] = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300') __snake_case : Optional[int] = self.default_image_processor __snake_case : List[Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : List[str] = model(**__a , training=__a) # verify the logits __snake_case : str = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4)) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300') __snake_case : List[Any] = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : Optional[int] = model(**__a , training=__a) # verify the logits __snake_case : Optional[int] = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : List[str] = tf.constant([-0.1_312, 0.4_353, -1.0_499]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4))
61
0
'''simple docstring''' import math import random from typing import Any from .hill_climbing import SearchProblem def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : bool = True , A : float = math.inf , A : float = -math.inf , A : float = math.inf , A : float = -math.inf , A : bool = False , A : float = 1_00 , A : float = 0.01 , A : float = 1 , ) -> Any: """simple docstring""" __snake_case : List[str] = False __snake_case : List[str] = search_prob __snake_case : Any = start_temperate __snake_case : Any = [] __snake_case : Union[str, Any] = 0 __snake_case : Optional[int] = None while not search_end: __snake_case : Dict = current_state.score() if best_state is None or current_score > best_state.score(): __snake_case : List[Any] = current_state scores.append(A ) iterations += 1 __snake_case : Tuple = None __snake_case : Optional[int] = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to __snake_case : Optional[Any] = random.randint(0 , len(A ) - 1 ) # picking a random neighbor __snake_case : Union[str, Any] = neighbors.pop(A ) __snake_case : Dict = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: __snake_case : Optional[Any] = change * -1 # in case we are finding minimum if change > 0: # improves the solution __snake_case : List[str] = picked_neighbor else: __snake_case : Any = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability __snake_case : Union[str, Any] = picked_neighbor __snake_case : int = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor __snake_case : str = True else: __snake_case : Optional[int] = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(A ) , A ) plt.xlabel('Iterations' ) plt.ylabel('Function values' ) plt.show() return best_state if __name__ == "__main__": def _SCREAMING_SNAKE_CASE ( A : Dict , A : List[str] ) -> Dict: """simple docstring""" return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) __A = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa) __A = simulated_annealing( prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True ) print( '''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) # starting the problem with initial coordinates (12, 47) __A = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa) __A = simulated_annealing( prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True ) print( '''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) def _SCREAMING_SNAKE_CASE ( A : str , A : List[Any] ) -> Any: """simple docstring""" return (3 * x**2) - (6 * y) __A = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) __A = simulated_annealing(prob, find_max=False, visualization=True) print( '''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' f'''{local_min.score()}''' ) __A = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) __A = simulated_annealing(prob, find_max=True, visualization=True) print( '''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' f'''{local_min.score()}''' )
716
'''simple docstring''' __A = {str(digit): digit**5 for digit in range(1_0)} def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) ) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" return sum( number for number in range(10_00 , 1_00_00_00 ) if number == digits_fifth_powers_sum(A ) ) if __name__ == "__main__": print(solution())
61
0
'''simple docstring''' import fire from utils import calculate_rouge, save_json def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : List[str] , A : Union[str, Any]=None , **A : List[Any] ) -> List[Any]: """simple docstring""" __snake_case : List[Any] = [x.strip() for x in open(A ).readlines()] __snake_case : Optional[Any] = [x.strip() for x in open(A ).readlines()][: len(A )] __snake_case : List[Any] = calculate_rouge(A , A , **A ) if save_path is not None: save_json(A , A , indent=A ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
717
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class a_ : _snake_case = 42 _snake_case = None _snake_case = None def _SCREAMING_SNAKE_CASE ( ) -> Node | None: """simple docstring""" __snake_case : str = Node(1 ) __snake_case : Tuple = Node(2 ) __snake_case : Optional[int] = Node(3 ) __snake_case : List[str] = Node(4 ) __snake_case : List[str] = Node(5 ) return tree def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> int: """simple docstring""" return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] if root is None: return output __snake_case : Optional[int] = deque([root] ) while process_queue: __snake_case : List[str] = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(A , A ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(A , A ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None] | list[Any]: """simple docstring""" if root is None: return [] __snake_case : list[Sequence[Node | None]] = [] __snake_case : List[Any] = 0 __snake_case : int = height(A ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(A , A ) ) __snake_case : int = 1 else: output.append(get_nodes_from_right_to_left(A , A ) ) __snake_case : Tuple = 0 return output def _SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing. """simple docstring""" __snake_case : Optional[int] = make_tree() print(F"""In-order Traversal: {inorder(A )}""" ) print(F"""Pre-order Traversal: {preorder(A )}""" ) print(F"""Post-order Traversal: {postorder(A )}""" , '\n' ) print(F"""Height of Tree: {height(A )}""" , '\n' ) print('Complete Level Order Traversal: ' ) print(level_order(A ) , '\n' ) print('Level-wise order Traversal: ' ) for level in range(1 , height(A ) + 1 ): print(F"""Level {level}:""" , get_nodes_from_left_to_right(A , level=A ) ) print('\nZigZag order Traversal: ' ) print(zigzag(A ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
61
0
'''simple docstring''' import random def _SCREAMING_SNAKE_CASE ( A : Dict , A : List[str] , A : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : str = a[left_index] __snake_case : List[str] = left_index + 1 for j in range(left_index + 1 , A ): if a[j] < pivot: __snake_case : Dict = a[i], a[j] i += 1 __snake_case : Dict = a[i - 1], a[left_index] return i - 1 def _SCREAMING_SNAKE_CASE ( A : int , A : Any , A : Tuple ) -> List[str]: """simple docstring""" if left < right: __snake_case : Any = random.randint(A , right - 1 ) __snake_case : Optional[Any] = ( a[left], a[pivot], ) # switches the pivot with the left most bound __snake_case : Optional[Any] = partition(A , A , A ) quick_sort_random( A , A , A ) # recursive quicksort to the left of the pivot point quick_sort_random( A , pivot_index + 1 , A ) # recursive quicksort to the right of the pivot point def _SCREAMING_SNAKE_CASE ( ) -> Any: """simple docstring""" __snake_case : int = input('Enter numbers separated by a comma:\n' ).strip() __snake_case : str = [int(A ) for item in user_input.split(',' )] quick_sort_random(A , 0 , len(A ) ) print(A ) if __name__ == "__main__": main()
718
'''simple docstring''' from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class a_ : def __init__(self , __a = None) -> None: """simple docstring""" if components is None: __snake_case : List[str] = [] __snake_case : Optional[int] = list(__a) def __len__(self) -> int: """simple docstring""" return len(self.__components) def __str__(self) -> str: """simple docstring""" return "(" + ",".join(map(__a , self.__components)) + ")" def __add__(self , __a) -> Vector: """simple docstring""" __snake_case : Optional[Any] = len(self) if size == len(__a): __snake_case : Optional[int] = [self.__components[i] + other.component(__a) for i in range(__a)] return Vector(__a) else: raise Exception('must have the same size') def __sub__(self , __a) -> Vector: """simple docstring""" __snake_case : Optional[Any] = len(self) if size == len(__a): __snake_case : Optional[int] = [self.__components[i] - other.component(__a) for i in range(__a)] return Vector(__a) else: # error case raise Exception('must have the same size') @overload def __mul__(self , __a) -> Vector: """simple docstring""" ... @overload def __mul__(self , __a) -> float: """simple docstring""" ... def __mul__(self , __a) -> float | Vector: """simple docstring""" if isinstance(__a , (float, int)): __snake_case : str = [c * other for c in self.__components] return Vector(__a) elif isinstance(__a , __a) and len(self) == len(__a): __snake_case : List[Any] = len(self) __snake_case : Dict = [self.__components[i] * other.component(__a) for i in range(__a)] return sum(__a) else: # error case raise Exception('invalid operand!') def SCREAMING_SNAKE_CASE__ (self) -> Vector: """simple docstring""" return Vector(self.__components) def SCREAMING_SNAKE_CASE__ (self , __a) -> float: """simple docstring""" if isinstance(__a , __a) and -len(self.__components) <= i < len(self.__components): return self.__components[i] else: raise Exception('index out of range') def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> None: """simple docstring""" assert -len(self.__components) <= pos < len(self.__components) __snake_case : int = value def SCREAMING_SNAKE_CASE__ (self) -> float: """simple docstring""" if len(self.__components) == 0: raise Exception('Vector is empty') __snake_case : Tuple = [c**2 for c in self.__components] return math.sqrt(sum(__a)) def SCREAMING_SNAKE_CASE__ (self , __a , __a = False) -> float: """simple docstring""" __snake_case : Tuple = self * other __snake_case : Optional[int] = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den)) else: return math.acos(num / den) def _SCREAMING_SNAKE_CASE ( A : int ) -> Vector: """simple docstring""" assert isinstance(A , A ) return Vector([0] * dimension ) def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> Vector: """simple docstring""" assert isinstance(A , A ) and (isinstance(A , A )) __snake_case : Any = [0] * dimension __snake_case : int = 1 return Vector(A ) def _SCREAMING_SNAKE_CASE ( A : float , A : Vector , A : Vector ) -> Vector: """simple docstring""" assert ( isinstance(A , A ) and isinstance(A , A ) and (isinstance(A , (int, float) )) ) return x * scalar + y def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int ) -> Vector: """simple docstring""" random.seed(A ) __snake_case : List[Any] = [random.randint(A , A ) for _ in range(A )] return Vector(A ) class a_ : def __init__(self , __a , __a , __a) -> None: """simple docstring""" __snake_case : Union[str, Any] = matrix __snake_case : int = w __snake_case : str = h def __str__(self) -> str: """simple docstring""" __snake_case : Dict = '' for i in range(self.__height): ans += "|" for j in range(self.__width): if j < self.__width - 1: ans += str(self.__matrix[i][j]) + "," else: ans += str(self.__matrix[i][j]) + "|\n" return ans def __add__(self , __a) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __snake_case : Tuple = [] for i in range(self.__height): __snake_case : List[Any] = [ self.__matrix[i][j] + other.component(__a , __a) for j in range(self.__width) ] matrix.append(__a) return Matrix(__a , self.__width , self.__height) else: raise Exception('matrix must have the same dimension!') def __sub__(self , __a) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __snake_case : str = [] for i in range(self.__height): __snake_case : List[str] = [ self.__matrix[i][j] - other.component(__a , __a) for j in range(self.__width) ] matrix.append(__a) return Matrix(__a , self.__width , self.__height) else: raise Exception('matrices must have the same dimension!') @overload def __mul__(self , __a) -> Matrix: """simple docstring""" ... @overload def __mul__(self , __a) -> Vector: """simple docstring""" ... def __mul__(self , __a) -> Vector | Matrix: """simple docstring""" if isinstance(__a , __a): # matrix-vector if len(__a) == self.__width: __snake_case : Tuple = zero_vector(self.__height) for i in range(self.__height): __snake_case : Union[str, Any] = [ self.__matrix[i][j] * other.component(__a) for j in range(self.__width) ] ans.change_component(__a , sum(__a)) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!') elif isinstance(__a , (int, float)): # matrix-scalar __snake_case : str = [ [self.__matrix[i][j] * other for j in range(self.__width)] for i in range(self.__height) ] return Matrix(__a , self.__width , self.__height) return None def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.__height def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.__width def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds') def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> None: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: __snake_case : List[Any] = value else: raise Exception('change_component: indices out of bounds') def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') __snake_case : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(__a)): __snake_case : Tuple = minor[i][:y] + minor[i][y + 1 :] return Matrix(__a , self.__width - 1 , self.__height - 1).determinant() def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(__a , __a) else: raise Exception('Indices out of bounds') def SCREAMING_SNAKE_CASE__ (self) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') if self.__height < 1: raise Exception('Matrix has no element') elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __snake_case : Any = [ self.__matrix[0][y] * self.cofactor(0 , __a) for y in range(self.__width) ] return sum(__a) def _SCREAMING_SNAKE_CASE ( A : int ) -> Matrix: """simple docstring""" __snake_case : list[list[float]] = [[0] * n for _ in range(A )] return Matrix(A , A , A ) def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int , A : int ) -> Matrix: """simple docstring""" random.seed(A ) __snake_case : list[list[float]] = [ [random.randint(A , A ) for _ in range(A )] for _ in range(A ) ] return Matrix(A , A , A )
61
0
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() __A = logging.get_logger('''transformers.models.encodec''') __A = { '''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''', '''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''', '''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''', '''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''', } __A = { '''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''', '''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''', '''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''', '''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''', '''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''', '''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''', '''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''', '''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''', '''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''', '''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''', '''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''', '''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''', '''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''', '''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''', '''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''', '''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''', '''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''', '''encoder.model.13.lstm''': '''encoder.layers.13.lstm''', '''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''', } __A = { '''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''', '''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''', '''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''', '''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''', '''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''', '''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''', '''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''', '''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''', '''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''', '''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''', '''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''', '''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''', '''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''', '''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''', '''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''', '''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''', '''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''', '''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''', } __A = { '''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''', '''decoder.model.1.lstm''': '''decoder.layers.1.lstm''', '''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''', '''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''', '''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''', '''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''', '''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''', '''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''', '''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''', '''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''', '''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''', '''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''', '''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''', '''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''', '''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''', '''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''', '''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''', '''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''', '''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''', } __A = { '''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''', '''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''', '''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''', '''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''', '''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''', '''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''', '''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''', '''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''', '''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''', '''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''', '''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''', '''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''', '''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''', '''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''', '''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''', '''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''', '''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''', '''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''', } __A = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } __A = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } __A = [] __A = [] def _SCREAMING_SNAKE_CASE ( A : Any , A : Tuple , A : Tuple , A : Optional[Any] , A : Union[str, Any] ) -> Dict: """simple docstring""" for attribute in key.split('.' ): __snake_case : str = getattr(A , A ) if weight_type is not None: __snake_case : List[Any] = getattr(A , A ).shape else: __snake_case : Tuple = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __snake_case : Tuple = value elif weight_type == "weight_g": __snake_case : Optional[Any] = value elif weight_type == "weight_v": __snake_case : Any = value elif weight_type == "bias": __snake_case : Union[str, Any] = value elif weight_type == "running_mean": __snake_case : Optional[int] = value elif weight_type == "running_var": __snake_case : Dict = value elif weight_type == "num_batches_tracked": __snake_case : Optional[int] = value elif weight_type == "weight_ih_l0": __snake_case : List[str] = value elif weight_type == "weight_hh_l0": __snake_case : int = value elif weight_type == "bias_ih_l0": __snake_case : List[Any] = value elif weight_type == "bias_hh_l0": __snake_case : List[Any] = value elif weight_type == "weight_ih_l1": __snake_case : Optional[int] = value elif weight_type == "weight_hh_l1": __snake_case : Any = value elif weight_type == "bias_ih_l1": __snake_case : Tuple = value elif weight_type == "bias_hh_l1": __snake_case : Union[str, Any] = value else: __snake_case : Optional[int] = value logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" ) def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : List[Any] ) -> Optional[Any]: """simple docstring""" for key in ignore_keys: if key.endswith('.*' ): if name.startswith(key[:-1] ): return True elif ".*." in key: __snake_case : str = key.split('.*.' ) if prefix in name and suffix in name: return True elif key in name: return True return False def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] , A : List[str] , A : Any ) -> Tuple: """simple docstring""" __snake_case : List[Any] = [] if model_name == "encodec_24khz" or "encodec_32khz": __snake_case : Dict = MAPPING_24K elif model_name == "encodec_48khz": __snake_case : Optional[Any] = MAPPING_48K else: raise ValueError(F"""Unsupported model: {model_name}""" ) for name, value in orig_dict.items(): if should_ignore(A , A ): logger.info(F"""{name} was ignored""" ) continue __snake_case : Optional[Any] = False for key, mapped_key in MAPPING.items(): if "*" in key: __snake_case : Union[str, Any] = key.split('.*.' ) if prefix in name and suffix in name: __snake_case : List[str] = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith('embed' ) and name.endswith('embed_avg' ): continue __snake_case : str = True if "*" in mapped_key: __snake_case : str = name.split(A )[0].split('.' )[-2] __snake_case : Dict = mapped_key.replace('*' , A ) if "weight_g" in name: __snake_case : Any = 'weight_g' elif "weight_v" in name: __snake_case : Dict = 'weight_v' elif "weight_ih_l0" in name: __snake_case : List[str] = 'weight_ih_l0' elif "weight_hh_l0" in name: __snake_case : Tuple = 'weight_hh_l0' elif "bias_ih_l0" in name: __snake_case : Tuple = 'bias_ih_l0' elif "bias_hh_l0" in name: __snake_case : Optional[Any] = 'bias_hh_l0' elif "weight_ih_l1" in name: __snake_case : List[Any] = 'weight_ih_l1' elif "weight_hh_l1" in name: __snake_case : Any = 'weight_hh_l1' elif "bias_ih_l1" in name: __snake_case : str = 'bias_ih_l1' elif "bias_hh_l1" in name: __snake_case : List[Any] = 'bias_hh_l1' elif "bias" in name: __snake_case : str = 'bias' elif "weight" in name: __snake_case : str = 'weight' elif "running_mean" in name: __snake_case : Any = 'running_mean' elif "running_var" in name: __snake_case : Tuple = 'running_var' elif "num_batches_tracked" in name: __snake_case : Any = 'num_batches_tracked' else: __snake_case : Optional[Any] = None set_recursively(A , A , A , A , A ) continue if not is_used: unused_weights.append(A ) logger.warning(F"""Unused weights: {unused_weights}""" ) @torch.no_grad() def _SCREAMING_SNAKE_CASE ( A : Dict , A : List[Any] , A : Dict , A : str=None , A : int=None , ) -> List[Any]: """simple docstring""" if config_path is not None: __snake_case : Any = EncodecConfig.from_pretrained(A ) else: __snake_case : Tuple = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": __snake_case : Optional[int] = [8, 5, 4, 4] __snake_case : Optional[int] = [2.2] __snake_case : Tuple = 64 __snake_case : List[str] = 3_20_00 __snake_case : Dict = 20_48 __snake_case : Any = False __snake_case : List[Any] = False __snake_case : Any = False elif model_name == "encodec_48khz": __snake_case : Tuple = [8, 5, 4, 2] __snake_case : Any = [3.0, 6.0, 12.0, 24.0] __snake_case : int = 4_80_00 __snake_case : Optional[Any] = 2 __snake_case : str = False __snake_case : Union[str, Any] = 'time_group_norm' __snake_case : List[str] = True __snake_case : Tuple = 1.0 __snake_case : Tuple = 0.01 else: raise ValueError(F"""Unknown model name: {model_name}""" ) __snake_case : List[Any] = EncodecModel(A ) __snake_case : Optional[int] = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(A ) __snake_case : Union[str, Any] = torch.load(A ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights __snake_case : str = original_checkpoint['best_state'] recursively_load_weights(A , A , A ) model.save_pretrained(A ) if repo_id: print('Pushing to the hub...' ) feature_extractor.push_to_hub(A ) model.push_to_hub(A ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '''--model''', default='''encodec_24khz''', type=str, help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) __A = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
719
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification __A = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co __A = '''main''' # Default branch name __A = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2''' # One particular commit (not the top of `main`) __A = '''aaaaaaa''' # This commit does not exist, so we should 404. __A = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684''' # Sha-1 of config.json on the top of `main`, for checking purposes __A = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3''' @contextlib.contextmanager def _SCREAMING_SNAKE_CASE ( ) -> List[Any]: """simple docstring""" print('Welcome!' ) yield print('Bye!' ) @contextlib.contextmanager def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: """simple docstring""" print('Bonjour!' ) yield print('Au revoir!' ) class a_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" assert transformers.__spec__ is not None assert importlib.util.find_spec('transformers') is not None class a_ ( unittest.TestCase ): @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> int: """simple docstring""" with ContextManagers([]): print('Transformers are awesome!') # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n') @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]: """simple docstring""" with ContextManagers([context_en()]): print('Transformers are awesome!') # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n') @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> Tuple: """simple docstring""" with ContextManagers([context_fr(), context_en()]): print('Transformers are awesome!') # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n') @require_torch def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" self.assertEqual(find_labels(__a) , ['labels']) self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label']) self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions']) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , ['labels']) @require_tf def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" self.assertEqual(find_labels(__a) , ['labels']) self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label']) self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions']) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , ['labels']) @require_flax def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" self.assertEqual(find_labels(__a) , []) self.assertEqual(find_labels(__a) , []) self.assertEqual(find_labels(__a) , []) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , [])
61
0
'''simple docstring''' import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask __A = logging.getLogger(__name__) class a_ ( UpperCamelCase_ ): def __init__(self , __a=-1) -> Dict: """simple docstring""" __snake_case : List[Any] = label_idx def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> List[InputExample]: """simple docstring""" if isinstance(__a , __a): __snake_case : Dict = mode.value __snake_case : Tuple = os.path.join(__a , F"""{mode}.txt""") __snake_case : Any = 1 __snake_case : int = [] with open(__a , encoding='utf-8') as f: __snake_case : int = [] __snake_case : int = [] for line in f: if line.startswith('-DOCSTART-') or line == "" or line == "\n": if words: examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=__a , labels=__a)) guid_index += 1 __snake_case : Optional[Any] = [] __snake_case : int = [] else: __snake_case : List[Any] = line.split(' ') words.append(splits[0]) if len(__a) > 1: labels.append(splits[self.label_idx].replace('\n' , '')) else: # Examples could have no label for mode = "test" labels.append('O') if words: examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=__a , labels=__a)) return examples def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Any: """simple docstring""" __snake_case : List[Any] = 0 for line in test_input_reader: if line.startswith('-DOCSTART-') or line == "" or line == "\n": writer.write(__a) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: __snake_case : List[Any] = line.split()[0] + ' ' + preds_list[example_id].pop(0) + '\n' writer.write(__a) else: logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0]) def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]: """simple docstring""" if path: with open(__a , 'r') as f: __snake_case : Tuple = f.read().splitlines() if "O" not in labels: __snake_case : Union[str, Any] = ['O'] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class a_ ( UpperCamelCase_ ): def __init__(self) -> str: """simple docstring""" super().__init__(label_idx=-2) def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]: """simple docstring""" if path: with open(__a , 'r') as f: __snake_case : Any = f.read().splitlines() if "O" not in labels: __snake_case : List[Any] = ['O'] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class a_ ( UpperCamelCase_ ): def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> List[InputExample]: """simple docstring""" if isinstance(__a , __a): __snake_case : List[Any] = mode.value __snake_case : Dict = os.path.join(__a , F"""{mode}.txt""") __snake_case : Union[str, Any] = 1 __snake_case : Union[str, Any] = [] with open(__a , encoding='utf-8') as f: for sentence in parse_incr(__a): __snake_case : Any = [] __snake_case : Dict = [] for token in sentence: words.append(token['form']) labels.append(token['upos']) assert len(__a) == len(__a) if words: examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=__a , labels=__a)) guid_index += 1 return examples def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> List[Any]: """simple docstring""" __snake_case : int = 0 for sentence in parse_incr(__a): __snake_case : Optional[Any] = preds_list[example_id] __snake_case : Any = '' for token in sentence: out += F"""{token["form"]} ({token["upos"]}|{s_p.pop(0)}) """ out += "\n" writer.write(__a) example_id += 1 def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]: """simple docstring""" if path: with open(__a , 'r') as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
720
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''TimmBackbone'''] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
0
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class a_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" __snake_case : List[Any] = 'ZinengTang/tvlt-base' __snake_case : Optional[Any] = tempfile.mkdtemp() def SCREAMING_SNAKE_CASE__ (self , **__a) -> Optional[Any]: """simple docstring""" return TvltImageProcessor.from_pretrained(self.checkpoint , **__a) def SCREAMING_SNAKE_CASE__ (self , **__a) -> Tuple: """simple docstring""" return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__a) def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" shutil.rmtree(self.tmpdirname) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Optional[int] = self.get_image_processor() __snake_case : Optional[int] = self.get_feature_extractor() __snake_case : Optional[int] = TvltProcessor(image_processor=__a , feature_extractor=__a) processor.save_pretrained(self.tmpdirname) __snake_case : Optional[int] = TvltProcessor.from_pretrained(self.tmpdirname) self.assertIsInstance(processor.feature_extractor , __a) self.assertIsInstance(processor.image_processor , __a) def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" __snake_case : List[Any] = self.get_image_processor() __snake_case : Union[str, Any] = self.get_feature_extractor() __snake_case : Optional[Any] = TvltProcessor(image_processor=__a , feature_extractor=__a) __snake_case : Dict = np.ones([1_2_0_0_0]) __snake_case : Tuple = feature_extractor(__a , return_tensors='np') __snake_case : int = processor(audio=__a , return_tensors='np') for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Optional[int] = self.get_image_processor() __snake_case : Union[str, Any] = self.get_feature_extractor() __snake_case : Optional[Any] = TvltProcessor(image_processor=__a , feature_extractor=__a) __snake_case : List[str] = np.ones([3, 2_2_4, 2_2_4]) __snake_case : Union[str, Any] = image_processor(__a , return_tensors='np') __snake_case : Optional[Any] = processor(images=__a , return_tensors='np') for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2) def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Optional[int] = self.get_image_processor() __snake_case : Optional[Any] = self.get_feature_extractor() __snake_case : Dict = TvltProcessor(image_processor=__a , feature_extractor=__a) __snake_case : Tuple = np.ones([1_2_0_0_0]) __snake_case : Union[str, Any] = np.ones([3, 2_2_4, 2_2_4]) __snake_case : List[Any] = processor(audio=__a , images=__a) self.assertListEqual(list(inputs.keys()) , ['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask']) # test if it raises when no input is passed with pytest.raises(__a): processor() def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" __snake_case : int = self.get_image_processor() __snake_case : Union[str, Any] = self.get_feature_extractor() __snake_case : List[Any] = TvltProcessor(image_processor=__a , feature_extractor=__a) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' , )
721
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" __snake_case : str = 1 for i in range(1 , num + 1 ): fact *= i return fact def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" __snake_case : Union[str, Any] = 0 while number > 0: __snake_case : Dict = number % 10 sum_of_digits += last_digit __snake_case : Union[str, Any] = number // 10 # Removing the last_digit from the given number return sum_of_digits def _SCREAMING_SNAKE_CASE ( A : int = 1_00 ) -> int: """simple docstring""" __snake_case : List[Any] = factorial(A ) __snake_case : Dict = split_and_add(A ) return result if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
61
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor __A = logging.get_logger(__name__) class a_ ( UpperCamelCase_ ): def __init__(self , *__a , **__a) -> None: """simple docstring""" warnings.warn( 'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PerceiverImageProcessor instead.' , __a , ) super().__init__(*__a , **__a)
700
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class a_ ( unittest.TestCase ): def __init__(self , __a , __a=7 , __a=3 , __a=1_8 , __a=3_0 , __a=4_0_0 , __a=True , __a=None , __a=True , __a=None , __a=True , __a=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __a=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __a=True , ) -> List[Any]: """simple docstring""" __snake_case : Tuple = size if size is not None else {'height': 2_2_4, 'width': 2_2_4} __snake_case : Any = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8} __snake_case : Optional[int] = parent __snake_case : Dict = batch_size __snake_case : str = num_channels __snake_case : Optional[Any] = image_size __snake_case : Optional[int] = min_resolution __snake_case : Tuple = max_resolution __snake_case : Optional[int] = do_resize __snake_case : Optional[int] = size __snake_case : Union[str, Any] = do_center_crop __snake_case : List[Any] = crop_size __snake_case : int = do_normalize __snake_case : Optional[Any] = image_mean __snake_case : str = image_std __snake_case : Optional[Any] = do_convert_rgb def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def SCREAMING_SNAKE_CASE__ (self , __a=False , __a=False , __a=False) -> List[str]: """simple docstring""" assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __snake_case : Optional[int] = [] for i in range(self.batch_size): image_inputs.append( np.random.randint( 2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta)) else: __snake_case : Dict = [] for i in range(self.batch_size): __snake_case ,__snake_case : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2) image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta)) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __snake_case : int = [Image.fromarray(np.moveaxis(__a , 0 , -1)) for x in image_inputs] if torchify: __snake_case : List[Any] = [torch.from_numpy(__a) for x in image_inputs] return image_inputs @require_torch @require_vision class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Union[str, Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=__a) @property def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : int = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__a , 'do_resize')) self.assertTrue(hasattr(__a , 'size')) self.assertTrue(hasattr(__a , 'do_center_crop')) self.assertTrue(hasattr(__a , 'center_crop')) self.assertTrue(hasattr(__a , 'do_normalize')) self.assertTrue(hasattr(__a , 'image_mean')) self.assertTrue(hasattr(__a , 'image_std')) self.assertTrue(hasattr(__a , 'do_convert_rgb')) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 2_2_4, 'width': 2_2_4}) self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8}) __snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4) self.assertEqual(image_processor.size , {'shortest_edge': 4_2}) self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4}) def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __snake_case : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a) for image in image_inputs: self.assertIsInstance(__a , Image.Image) # Test not batched input __snake_case : int = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : List[Any] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __snake_case : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=__a , numpify=__a) for image in image_inputs: self.assertIsInstance(__a , np.ndarray) # Test not batched input __snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : int = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Any = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __snake_case : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__a , torchify=__a) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor) # Test not batched input __snake_case : Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : Union[str, Any] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) @require_torch @require_vision class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__a) __snake_case : List[Any] = 3 @property def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__a , 'do_resize')) self.assertTrue(hasattr(__a , 'size')) self.assertTrue(hasattr(__a , 'do_center_crop')) self.assertTrue(hasattr(__a , 'center_crop')) self.assertTrue(hasattr(__a , 'do_normalize')) self.assertTrue(hasattr(__a , 'image_mean')) self.assertTrue(hasattr(__a , 'image_std')) self.assertTrue(hasattr(__a , 'do_convert_rgb')) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __snake_case : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a) for image in image_inputs: self.assertIsInstance(__a , Image.Image) # Test not batched input __snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : Optional[int] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
61
0
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class a_ ( UpperCamelCase_ ): _snake_case = (DEISMultistepScheduler,) _snake_case = (("""num_inference_steps""", 25),) def SCREAMING_SNAKE_CASE__ (self , **__a) -> List[Any]: """simple docstring""" __snake_case : Optional[int] = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'solver_order': 2, } config.update(**__a) return config def SCREAMING_SNAKE_CASE__ (self , __a=0 , **__a) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = dict(self.forward_default_kwargs) __snake_case : Optional[int] = kwargs.pop('num_inference_steps' , __a) __snake_case : List[Any] = self.dummy_sample __snake_case : Optional[int] = 0.1 * sample __snake_case : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __snake_case : Optional[int] = self.get_scheduler_config(**__a) __snake_case : int = scheduler_class(**__a) scheduler.set_timesteps(__a) # copy over dummy past residuals __snake_case : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__a) __snake_case : Optional[int] = scheduler_class.from_pretrained(__a) new_scheduler.set_timesteps(__a) # copy over dummy past residuals __snake_case : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] __snake_case : int = sample, sample for t in range(__a , time_step + scheduler.config.solver_order + 1): __snake_case : Tuple = scheduler.step(__a , __a , __a , **__a).prev_sample __snake_case : List[str] = new_scheduler.step(__a , __a , __a , **__a).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self , __a=0 , **__a) -> Any: """simple docstring""" __snake_case : Optional[Any] = dict(self.forward_default_kwargs) __snake_case : str = kwargs.pop('num_inference_steps' , __a) __snake_case : Any = self.dummy_sample __snake_case : List[Any] = 0.1 * sample __snake_case : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __snake_case : Tuple = self.get_scheduler_config() __snake_case : Any = scheduler_class(**__a) scheduler.set_timesteps(__a) # copy over dummy past residuals (must be after setting timesteps) __snake_case : int = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__a) __snake_case : Optional[int] = scheduler_class.from_pretrained(__a) # copy over dummy past residuals new_scheduler.set_timesteps(__a) # copy over dummy past residual (must be after setting timesteps) __snake_case : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order] __snake_case : Dict = scheduler.step(__a , __a , __a , **__a).prev_sample __snake_case : Tuple = new_scheduler.step(__a , __a , __a , **__a).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ (self , __a=None , **__a) -> Optional[Any]: """simple docstring""" if scheduler is None: __snake_case : Dict = self.scheduler_classes[0] __snake_case : Tuple = self.get_scheduler_config(**__a) __snake_case : Optional[int] = scheduler_class(**__a) __snake_case : Optional[Any] = self.scheduler_classes[0] __snake_case : Optional[Any] = self.get_scheduler_config(**__a) __snake_case : int = scheduler_class(**__a) __snake_case : Any = 1_0 __snake_case : Optional[Any] = self.dummy_model() __snake_case : str = self.dummy_sample_deter scheduler.set_timesteps(__a) for i, t in enumerate(scheduler.timesteps): __snake_case : Tuple = model(__a , __a) __snake_case : Optional[Any] = scheduler.step(__a , __a , __a).prev_sample return sample def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" __snake_case : List[Any] = dict(self.forward_default_kwargs) __snake_case : str = kwargs.pop('num_inference_steps' , __a) for scheduler_class in self.scheduler_classes: __snake_case : Any = self.get_scheduler_config() __snake_case : Optional[int] = scheduler_class(**__a) __snake_case : Dict = self.dummy_sample __snake_case : Dict = 0.1 * sample if num_inference_steps is not None and hasattr(__a , 'set_timesteps'): scheduler.set_timesteps(__a) elif num_inference_steps is not None and not hasattr(__a , 'set_timesteps'): __snake_case : str = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.10] __snake_case : List[Any] = dummy_past_residuals[: scheduler.config.solver_order] __snake_case : Union[str, Any] = scheduler.timesteps[5] __snake_case : List[str] = scheduler.timesteps[6] __snake_case : Optional[Any] = scheduler.step(__a , __a , __a , **__a).prev_sample __snake_case : List[Any] = scheduler.step(__a , __a , __a , **__a).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Optional[int] = DEISMultistepScheduler(**self.get_scheduler_config()) __snake_case : Dict = self.full_loop(scheduler=__a) __snake_case : Any = torch.mean(torch.abs(__a)) assert abs(result_mean.item() - 0.23_916) < 1E-3 __snake_case : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config) __snake_case : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config) __snake_case : Tuple = UniPCMultistepScheduler.from_config(scheduler.config) __snake_case : Dict = DEISMultistepScheduler.from_config(scheduler.config) __snake_case : Dict = self.full_loop(scheduler=__a) __snake_case : List[str] = torch.mean(torch.abs(__a)) assert abs(result_mean.item() - 0.23_916) < 1E-3 def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__a) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" self.check_over_configs(thresholding=__a) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__a , prediction_type=__a , sample_max_value=__a , algorithm_type='deis' , solver_order=__a , solver_type=__a , ) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a) def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , ) __snake_case : Dict = self.full_loop( solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , ) assert not torch.isnan(__a).any(), "Samples have nan numbers" def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" self.check_over_configs(lower_order_final=__a) self.check_over_configs(lower_order_final=__a) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=__a , time_step=0) def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" __snake_case : List[Any] = self.full_loop() __snake_case : Optional[Any] = torch.mean(torch.abs(__a)) assert abs(result_mean.item() - 0.23_916) < 1E-3 def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : List[str] = self.full_loop(prediction_type='v_prediction') __snake_case : Tuple = torch.mean(torch.abs(__a)) assert abs(result_mean.item() - 0.091) < 1E-3 def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[str] = self.scheduler_classes[0] __snake_case : int = self.get_scheduler_config(thresholding=__a , dynamic_thresholding_ratio=0) __snake_case : Union[str, Any] = scheduler_class(**__a) __snake_case : List[str] = 1_0 __snake_case : Dict = self.dummy_model() __snake_case : str = self.dummy_sample_deter.half() scheduler.set_timesteps(__a) for i, t in enumerate(scheduler.timesteps): __snake_case : List[Any] = model(__a , __a) __snake_case : Dict = scheduler.step(__a , __a , __a).prev_sample assert sample.dtype == torch.floataa
701
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class a_ ( UpperCamelCase_ ): _snake_case = """vit_msn""" def __init__(self , __a=7_6_8 , __a=1_2 , __a=1_2 , __a=3_0_7_2 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1E-06 , __a=2_2_4 , __a=1_6 , __a=3 , __a=True , **__a , ) -> Any: """simple docstring""" super().__init__(**__a) __snake_case : List[str] = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : Optional[Any] = num_attention_heads __snake_case : str = intermediate_size __snake_case : List[str] = hidden_act __snake_case : List[Any] = hidden_dropout_prob __snake_case : Tuple = attention_probs_dropout_prob __snake_case : List[str] = initializer_range __snake_case : Optional[int] = layer_norm_eps __snake_case : Dict = image_size __snake_case : int = patch_size __snake_case : Dict = num_channels __snake_case : Tuple = qkv_bias
61
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( A : Optional[Any] , A : Optional[Any]=False ) -> Optional[Any]: """simple docstring""" __snake_case : str = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('cls_token', 'vit.embeddings.cls_token'), ('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'), ('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'), ('pos_embed', 'vit.embeddings.position_embeddings'), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" __snake_case : int = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('norm.weight', 'vit.layernorm.weight'), ('norm.bias', 'vit.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def _SCREAMING_SNAKE_CASE ( A : str , A : Dict , A : Optional[Any]=False ) -> int: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: __snake_case : Optional[int] = '' else: __snake_case : List[str] = 'vit.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __snake_case : List[str] = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) __snake_case : Union[str, Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __snake_case : Any = in_proj_weight[ : config.hidden_size, : ] __snake_case : Tuple = in_proj_bias[: config.hidden_size] __snake_case : Optional[int] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __snake_case : Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __snake_case : List[Any] = in_proj_weight[ -config.hidden_size :, : ] __snake_case : Optional[int] = in_proj_bias[-config.hidden_size :] def _SCREAMING_SNAKE_CASE ( A : str ) -> Tuple: """simple docstring""" __snake_case : Optional[int] = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(A , A ) def _SCREAMING_SNAKE_CASE ( A : Tuple , A : str , A : Optional[Any] ) -> List[Any]: """simple docstring""" __snake_case : List[str] = dct.pop(A ) __snake_case : List[str] = val def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg' __snake_case : Optional[Any] = Image.open(requests.get(A , stream=A ).raw ) return im @torch.no_grad() def _SCREAMING_SNAKE_CASE ( A : Tuple , A : Union[str, Any] , A : Dict=True ) -> int: """simple docstring""" __snake_case : Optional[int] = ViTConfig() # patch_size if model_name[-1] == "8": __snake_case : Optional[int] = 8 # set labels if required if not base_model: __snake_case : Any = 10_00 __snake_case : Tuple = 'huggingface/label-files' __snake_case : str = 'imagenet-1k-id2label.json' __snake_case : int = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) ) __snake_case : Any = {int(A ): v for k, v in idalabel.items()} __snake_case : Tuple = idalabel __snake_case : Optional[Any] = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: __snake_case : str = 3_84 __snake_case : Dict = 15_36 __snake_case : Tuple = 12 __snake_case : Dict = 6 # load original model from torch hub __snake_case : List[str] = torch.hub.load('facebookresearch/dino:main' , A ) original_model.eval() # load state_dict of original model, remove and rename some keys __snake_case : List[Any] = original_model.state_dict() if base_model: remove_classification_head_(A ) __snake_case : Any = create_rename_keys(A , base_model=A ) for src, dest in rename_keys: rename_key(A , A , A ) read_in_q_k_v(A , A , A ) # load HuggingFace model if base_model: __snake_case : Union[str, Any] = ViTModel(A , add_pooling_layer=A ).eval() else: __snake_case : Optional[Any] = ViTForImageClassification(A ).eval() model.load_state_dict(A ) # Check outputs on an image, prepared by ViTImageProcessor __snake_case : Tuple = ViTImageProcessor() __snake_case : List[Any] = image_processor(images=prepare_img() , return_tensors='pt' ) __snake_case : List[Any] = encoding['pixel_values'] __snake_case : Optional[Any] = model(A ) if base_model: __snake_case : List[Any] = original_model(A ) assert torch.allclose(A , outputs.last_hidden_state[:, 0, :] , atol=1e-1 ) else: __snake_case : str = original_model(A ) assert logits.shape == outputs.logits.shape assert torch.allclose(A , outputs.logits , atol=1e-3 ) Path(A ).mkdir(exist_ok=A ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(A ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(A ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''dino_vitb16''', type=str, help='''Name of the model trained with DINO you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--base_model''', action='''store_true''', help='''Whether to only convert the base model (no projection head weights).''', ) parser.set_defaults(base_model=True) __A = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
702
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : float , A : list[float] ) -> float: """simple docstring""" if discount_rate < 0: raise ValueError('Discount rate cannot be negative' ) if not cash_flows: raise ValueError('Cash flows list cannot be empty' ) __snake_case : List[str] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(A ) ) return round(A , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
61
0
'''simple docstring''' import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline __A = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False) parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''') parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''') __A = parser.parse_args() __A = '''cpu''' __A = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings''' __A = '''path-to-your-trained-model''' __A = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: __A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) __A = pipe.to(device) # to channels last __A = pipe.unet.to(memory_format=torch.channels_last) __A = pipe.vae.to(memory_format=torch.channels_last) __A = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: __A = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex __A = torch.randn(2, 4, 6_4, 6_4) __A = torch.rand(1) * 9_9_9 __A = torch.randn(2, 7_7, 7_6_8) __A = (sample, timestep, encoder_hidden_status) try: __A = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: __A = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) __A = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) __A = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: __A = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute __A = 6_6_6 __A = torch.Generator(device).manual_seed(seed) __A = {'''generator''': generator} if args.steps is not None: __A = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): __A = pipe(prompt, **generate_kwargs).images[0] # save image image.save('''generated.png''')
703
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A = { '''configuration_distilbert''': [ '''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DistilBertConfig''', '''DistilBertOnnxConfig''', ], '''tokenization_distilbert''': ['''DistilBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''DistilBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DistilBertForMaskedLM''', '''DistilBertForMultipleChoice''', '''DistilBertForQuestionAnswering''', '''DistilBertForSequenceClassification''', '''DistilBertForTokenClassification''', '''DistilBertModel''', '''DistilBertPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDistilBertForMaskedLM''', '''TFDistilBertForMultipleChoice''', '''TFDistilBertForQuestionAnswering''', '''TFDistilBertForSequenceClassification''', '''TFDistilBertForTokenClassification''', '''TFDistilBertMainLayer''', '''TFDistilBertModel''', '''TFDistilBertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''FlaxDistilBertForMaskedLM''', '''FlaxDistilBertForMultipleChoice''', '''FlaxDistilBertForQuestionAnswering''', '''FlaxDistilBertForSequenceClassification''', '''FlaxDistilBertForTokenClassification''', '''FlaxDistilBertModel''', '''FlaxDistilBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
0
'''simple docstring''' import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = MobileBertTokenizer _snake_case = MobileBertTokenizerFast _snake_case = True _snake_case = True _snake_case = filter_non_english _snake_case = """google/mobilebert-uncased""" def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" super().setUp() __snake_case : str = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens])) __snake_case : str = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def SCREAMING_SNAKE_CASE__ (self , __a) -> Tuple: """simple docstring""" __snake_case : Any = 'UNwant\u00E9d,running' __snake_case : Union[str, Any] = 'unwanted, running' return input_text, output_text def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Dict = self.tokenizer_class(self.vocab_file) __snake_case : int = tokenizer.tokenize('UNwant\u00E9d,running') self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing']) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , [9, 6, 7, 1_2, 1_0, 1_1]) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" if not self.test_rust_tokenizer: return __snake_case : Dict = self.get_tokenizer() __snake_case : Optional[Any] = self.get_rust_tokenizer() __snake_case : int = 'UNwant\u00E9d,running' __snake_case : Tuple = tokenizer.tokenize(__a) __snake_case : int = rust_tokenizer.tokenize(__a) self.assertListEqual(__a , __a) __snake_case : Union[str, Any] = tokenizer.encode(__a , add_special_tokens=__a) __snake_case : int = rust_tokenizer.encode(__a , add_special_tokens=__a) self.assertListEqual(__a , __a) __snake_case : Optional[int] = self.get_rust_tokenizer() __snake_case : List[str] = tokenizer.encode(__a) __snake_case : int = rust_tokenizer.encode(__a) self.assertListEqual(__a , __a) # With lower casing __snake_case : Tuple = self.get_tokenizer(do_lower_case=__a) __snake_case : int = self.get_rust_tokenizer(do_lower_case=__a) __snake_case : Optional[int] = 'UNwant\u00E9d,running' __snake_case : str = tokenizer.tokenize(__a) __snake_case : Any = rust_tokenizer.tokenize(__a) self.assertListEqual(__a , __a) __snake_case : List[str] = tokenizer.encode(__a , add_special_tokens=__a) __snake_case : Dict = rust_tokenizer.encode(__a , add_special_tokens=__a) self.assertListEqual(__a , __a) __snake_case : Optional[Any] = self.get_rust_tokenizer() __snake_case : Union[str, Any] = tokenizer.encode(__a) __snake_case : Optional[Any] = rust_tokenizer.encode(__a) self.assertListEqual(__a , __a) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : str = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz') , ['ah', '\u535A', '\u63A8', 'zz']) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Dict = BasicTokenizer(do_lower_case=__a) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['hello', '!', 'how', 'are', 'you', '?']) self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello']) def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Dict = BasicTokenizer(do_lower_case=__a , strip_accents=__a) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hällo', '!', 'how', 'are', 'you', '?']) self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['h\u00E9llo']) def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" __snake_case : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?']) self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello']) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=__a) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?']) self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello']) def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : int = BasicTokenizer(do_lower_case=__a) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?']) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[int] = BasicTokenizer(do_lower_case=__a , strip_accents=__a) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HäLLo', '!', 'how', 'Are', 'yoU', '?']) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HaLLo', '!', 'how', 'Are', 'yoU', '?']) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Optional[Any] = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]']) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]']) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] __snake_case : Optional[Any] = {} for i, token in enumerate(__a): __snake_case : str = i __snake_case : Optional[Any] = WordpieceTokenizer(vocab=__a , unk_token='[UNK]') self.assertListEqual(tokenizer.tokenize('') , []) self.assertListEqual(tokenizer.tokenize('unwanted running') , ['un', '##want', '##ed', 'runn', '##ing']) self.assertListEqual(tokenizer.tokenize('unwantedX running') , ['[UNK]', 'runn', '##ing']) def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" self.assertTrue(_is_whitespace(' ')) self.assertTrue(_is_whitespace('\t')) self.assertTrue(_is_whitespace('\r')) self.assertTrue(_is_whitespace('\n')) self.assertTrue(_is_whitespace('\u00A0')) self.assertFalse(_is_whitespace('A')) self.assertFalse(_is_whitespace('-')) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" self.assertTrue(_is_control('\u0005')) self.assertFalse(_is_control('A')) self.assertFalse(_is_control(' ')) self.assertFalse(_is_control('\t')) self.assertFalse(_is_control('\r')) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" self.assertTrue(_is_punctuation('-')) self.assertTrue(_is_punctuation('$')) self.assertTrue(_is_punctuation('`')) self.assertTrue(_is_punctuation('.')) self.assertFalse(_is_punctuation('A')) self.assertFalse(_is_punctuation(' ')) def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Any = self.get_tokenizer() __snake_case : Union[str, Any] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(__a) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']]) self.assertListEqual( [rust_tokenizer.tokenize(__a) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']]) @slow def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : int = self.tokenizer_class.from_pretrained('google/mobilebert-uncased') __snake_case : Optional[Any] = tokenizer.encode('sequence builders' , add_special_tokens=__a) __snake_case : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a) __snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(__a) __snake_case : str = tokenizer.build_inputs_with_special_tokens(__a , __a) assert encoded_sentence == [1_0_1] + text + [1_0_2] assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2] def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""): __snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a) __snake_case : Dict = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" __snake_case : Union[str, Any] = tokenizer_r.encode_plus( __a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , ) __snake_case : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , 'do_lower_case') else False __snake_case : Tuple = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), 'Allen'), ((2_1, 2_3), '##NL'), ((2_3, 2_4), '##P'), ((2_5, 3_3), 'sentence'), ((3_3, 3_4), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), 'allen'), ((2_1, 2_3), '##nl'), ((2_3, 2_4), '##p'), ((2_5, 3_3), 'sentence'), ((3_3, 3_4), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'])) self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping']) def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = ['的', '人', '有'] __snake_case : List[str] = ''.join(__a) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""): __snake_case : Any = True __snake_case : int = self.tokenizer_class.from_pretrained(__a , **__a) __snake_case : int = self.rust_tokenizer_class.from_pretrained(__a , **__a) __snake_case : List[Any] = tokenizer_p.encode(__a , add_special_tokens=__a) __snake_case : Optional[Any] = tokenizer_r.encode(__a , add_special_tokens=__a) __snake_case : int = tokenizer_r.convert_ids_to_tokens(__a) __snake_case : List[Any] = tokenizer_p.convert_ids_to_tokens(__a) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(__a , __a) self.assertListEqual(__a , __a) __snake_case : Tuple = False __snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a) __snake_case : Optional[int] = self.tokenizer_class.from_pretrained(__a , **__a) __snake_case : str = tokenizer_r.encode(__a , add_special_tokens=__a) __snake_case : int = tokenizer_p.encode(__a , add_special_tokens=__a) __snake_case : str = tokenizer_r.convert_ids_to_tokens(__a) __snake_case : Any = tokenizer_p.convert_ids_to_tokens(__a) # it is expected that only the first Chinese character is not preceded by "##". __snake_case : Optional[Any] = [ F"""##{token}""" if idx != 0 else token for idx, token in enumerate(__a) ] self.assertListEqual(__a , __a) self.assertListEqual(__a , __a)
704
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __A = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def _SCREAMING_SNAKE_CASE ( A : Tuple ) -> str: """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(A ) def _SCREAMING_SNAKE_CASE ( A : int ) -> Optional[int]: """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main __snake_case : Any = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(A , id=A )
61
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __A = { '''configuration_pix2struct''': [ '''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Pix2StructConfig''', '''Pix2StructTextConfig''', '''Pix2StructVisionConfig''', ], '''processing_pix2struct''': ['''Pix2StructProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''Pix2StructImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Pix2StructPreTrainedModel''', '''Pix2StructForConditionalGeneration''', '''Pix2StructVisionModel''', '''Pix2StructTextModel''', ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
705
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A = { '''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''], '''tokenization_biogpt''': ['''BioGptTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BioGptForCausalLM''', '''BioGptForTokenClassification''', '''BioGptForSequenceClassification''', '''BioGptModel''', '''BioGptPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : list , A : int = 0 ) -> list: """simple docstring""" __snake_case : Dict = length or len(A ) __snake_case : Any = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: __snake_case : Tuple = list_data[i + 1], list_data[i] __snake_case : Optional[int] = True return list_data if not swapped else bubble_sort(A , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
706
'''simple docstring''' from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> int: """simple docstring""" if not is_accelerate_available(): return method __snake_case : Optional[Any] = version.parse(accelerate.__version__ ).base_version if version.parse(A ) < version.parse('0.17.0' ): return method def wrapper(self : Optional[Any] , *A : Optional[Any] , **A : Optional[int] ): if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ): self._hf_hook.pre_forward(self ) return method(self , *A , **A ) return wrapper
61
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class a_ ( unittest.TestCase ): def __init__(self , __a , __a=7 , __a=3 , __a=1_8 , __a=3_0 , __a=4_0_0 , __a=True , __a=None , __a=True , ) -> Tuple: """simple docstring""" __snake_case : str = size if size is not None else {'height': 1_8, 'width': 1_8} __snake_case : Optional[int] = parent __snake_case : List[Any] = batch_size __snake_case : str = num_channels __snake_case : Any = image_size __snake_case : Union[str, Any] = min_resolution __snake_case : Optional[Any] = max_resolution __snake_case : Optional[Any] = do_resize __snake_case : Optional[int] = size __snake_case : str = apply_ocr def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = LayoutLMvaImageProcessor if is_pytesseract_available() else None def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" __snake_case : Union[str, Any] = LayoutLMvaImageProcessingTester(self) @property def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" __snake_case : int = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__a , 'do_resize')) self.assertTrue(hasattr(__a , 'size')) self.assertTrue(hasattr(__a , 'apply_ocr')) def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8}) __snake_case : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2) self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2}) def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : int = self.image_processing_class(**self.image_processor_dict) # create random PIL images __snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a) for image in image_inputs: self.assertIsInstance(__a , Image.Image) # Test not batched input __snake_case : List[str] = image_processing(image_inputs[0] , return_tensors='pt') self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) self.assertIsInstance(encoding.words , __a) self.assertIsInstance(encoding.boxes , __a) # Test batched __snake_case : Union[str, Any] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a) for image in image_inputs: self.assertIsInstance(__a , np.ndarray) # Test not batched input __snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched __snake_case : int = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" __snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __snake_case : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor) # Test not batched input __snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched __snake_case : Any = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" __snake_case : List[str] = LayoutLMvaImageProcessor() from datasets import load_dataset __snake_case : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test') __snake_case : List[str] = Image.open(ds[0]['file']).convert('RGB') __snake_case : str = image_processing(__a , return_tensors='pt') self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4)) self.assertEqual(len(encoding.words) , len(encoding.boxes)) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __snake_case : Dict = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231 __snake_case : List[str] = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __a) self.assertListEqual(encoding.boxes , __a) # with apply_OCR = False __snake_case : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=__a) __snake_case : int = image_processing(__a , return_tensors='pt') self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
707
'''simple docstring''' import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class a_ ( unittest.TestCase , UpperCamelCase_ ): def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[str] = load_tool('text-to-speech') self.tool.setup() def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" torch.manual_seed(0) __snake_case : Dict = self.tool('hey') __snake_case : List[Any] = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , )) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" torch.manual_seed(0) __snake_case : Any = self.tool('hey') __snake_case : Any = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , ))
61
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : int = 10 , A : int = 10_00 , A : bool = True ) -> int: """simple docstring""" assert ( isinstance(A , A ) and isinstance(A , A ) and isinstance(A , A ) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' ) return min_val if option else max_val def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> int: """simple docstring""" return int((number_a + number_a) / 2 ) def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int ) -> None: """simple docstring""" assert ( isinstance(A , A ) and isinstance(A , A ) and isinstance(A , A ) ), 'argument values must be type of "int"' if lower > higher: raise ValueError('argument value for lower and higher must be(lower > higher)' ) if not lower < to_guess < higher: raise ValueError( 'guess value must be within the range of lower and higher value' ) def answer(A : int ) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print('started...' ) __snake_case : Dict = lower __snake_case : List[Any] = higher __snake_case : Optional[Any] = [] while True: __snake_case : Any = get_avg(A , A ) last_numbers.append(A ) if answer(A ) == "low": __snake_case : Optional[Any] = number elif answer(A ) == "high": __snake_case : Dict = number else: break print(F"""guess the number : {last_numbers[-1]}""" ) print(F"""details : {last_numbers!s}""" ) def _SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" __snake_case : Optional[Any] = int(input('Enter lower value : ' ).strip() ) __snake_case : List[Any] = int(input('Enter high value : ' ).strip() ) __snake_case : List[str] = int(input('Enter value to guess : ' ).strip() ) guess_the_number(A , A , A ) if __name__ == "__main__": main()
708
'''simple docstring''' import math class a_ : def __init__(self , __a=0) -> Any: # a graph with Node 0,1,...,N-1 """simple docstring""" __snake_case : List[str] = n __snake_case : Tuple = [ [math.inf for j in range(0 , __a)] for i in range(0 , __a) ] # adjacency matrix for weight __snake_case : Union[str, Any] = [ [math.inf for j in range(0 , __a)] for i in range(0 , __a) ] # dp[i][j] stores minimum distance from i to j def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple: """simple docstring""" __snake_case : Union[str, Any] = w def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" for k in range(0 , self.n): for i in range(0 , self.n): for j in range(0 , self.n): __snake_case : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j]) def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]: """simple docstring""" return self.dp[u][v] if __name__ == "__main__": __A = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 1_0) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 1_0) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
61
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { '''facebook/deit-base-distilled-patch16-224''': ( '''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json''' ), # See all DeiT models at https://huggingface.co/models?filter=deit } class a_ ( UpperCamelCase_ ): _snake_case = """deit""" def __init__(self , __a=7_6_8 , __a=1_2 , __a=1_2 , __a=3_0_7_2 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1E-12 , __a=2_2_4 , __a=1_6 , __a=3 , __a=True , __a=1_6 , **__a , ) -> Union[str, Any]: super().__init__(**__a) __snake_case : Optional[int] = hidden_size __snake_case : int = num_hidden_layers __snake_case : List[Any] = num_attention_heads __snake_case : Optional[int] = intermediate_size __snake_case : Tuple = hidden_act __snake_case : Optional[int] = hidden_dropout_prob __snake_case : Tuple = attention_probs_dropout_prob __snake_case : Optional[int] = initializer_range __snake_case : List[Any] = layer_norm_eps __snake_case : List[Any] = image_size __snake_case : Optional[Any] = patch_size __snake_case : List[Any] = num_channels __snake_case : Tuple = qkv_bias __snake_case : Optional[int] = encoder_stride class a_ ( UpperCamelCase_ ): _snake_case = version.parse("""1.11""" ) @property def SCREAMING_SNAKE_CASE__ (self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def SCREAMING_SNAKE_CASE__ (self) -> float: return 1E-4
709
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __A = logging.get_logger(__name__) class a_ ( UpperCamelCase_ ): _snake_case = ["""pixel_values"""] def __init__(self , __a = True , __a = None , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = 1 / 2_5_5 , __a = True , __a = None , __a = None , **__a , ) -> None: """simple docstring""" super().__init__(**__a) __snake_case : Tuple = size if size is not None else {'shortest_edge': 3_8_4} __snake_case : List[Any] = get_size_dict(__a , default_to_square=__a) __snake_case : int = do_resize __snake_case : List[str] = size # Default value set here for backwards compatibility where the value in config is None __snake_case : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 __snake_case : Tuple = resample __snake_case : Dict = do_rescale __snake_case : Any = rescale_factor __snake_case : str = do_normalize __snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __snake_case : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray: """simple docstring""" __snake_case : Dict = get_size_dict(__a , default_to_square=__a) if "shortest_edge" not in size: raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""") __snake_case : List[str] = size['shortest_edge'] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __snake_case : Any = int(shortest_edge / crop_pct) __snake_case : Any = get_resize_output_image_size(__a , size=__a , default_to_square=__a) __snake_case : int = resize(image=__a , size=__a , resample=__a , data_format=__a , **__a) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__a , size=(shortest_edge, shortest_edge) , data_format=__a , **__a) else: # warping (no cropping) when evaluated at 384 or larger return resize( __a , size=(shortest_edge, shortest_edge) , resample=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , **__a , ) -> Any: """simple docstring""" return rescale(__a , scale=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray: """simple docstring""" return normalize(__a , mean=__a , std=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> PIL.Image.Image: """simple docstring""" __snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize __snake_case : Dict = crop_pct if crop_pct is not None else self.crop_pct __snake_case : Tuple = resample if resample is not None else self.resample __snake_case : Any = do_rescale if do_rescale is not None else self.do_rescale __snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize __snake_case : Optional[int] = image_mean if image_mean is not None else self.image_mean __snake_case : Optional[Any] = image_std if image_std is not None else self.image_std __snake_case : List[str] = size if size is not None else self.size __snake_case : Any = get_size_dict(__a , default_to_square=__a) __snake_case : Dict = make_list_of_images(__a) if not valid_images(__a): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.') if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # All transformations expect numpy arrays. __snake_case : Tuple = [to_numpy_array(__a) for image in images] if do_resize: __snake_case : Optional[int] = [self.resize(image=__a , size=__a , crop_pct=__a , resample=__a) for image in images] if do_rescale: __snake_case : Optional[int] = [self.rescale(image=__a , scale=__a) for image in images] if do_normalize: __snake_case : Any = [self.normalize(image=__a , mean=__a , std=__a) for image in images] __snake_case : Dict = [to_channel_dimension_format(__a , __a) for image in images] __snake_case : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=__a , tensor_type=__a)
61
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __A = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''DeiTFeatureExtractor'''] __A = ['''DeiTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DeiTForImageClassification''', '''DeiTForImageClassificationWithTeacher''', '''DeiTForMaskedImageModeling''', '''DeiTModel''', '''DeiTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDeiTForImageClassification''', '''TFDeiTForImageClassificationWithTeacher''', '''TFDeiTForMaskedImageModeling''', '''TFDeiTModel''', '''TFDeiTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
710
'''simple docstring''' from functools import lru_cache @lru_cache def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" if num < 0: raise ValueError('Number should not be negative.' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
61
0
'''simple docstring''' import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class a_ ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" torch.manual_seed(0) __snake_case : Any = UNetaDModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) return model def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Union[str, Any] = self.dummy_uncond_unet __snake_case : int = ScoreSdeVeScheduler() __snake_case : int = ScoreSdeVePipeline(unet=__a , scheduler=__a) sde_ve.to(__a) sde_ve.set_progress_bar_config(disable=__a) __snake_case : Tuple = torch.manual_seed(0) __snake_case : Any = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=__a).images __snake_case : Any = torch.manual_seed(0) __snake_case : Dict = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=__a , return_dict=__a)[ 0 ] __snake_case : str = image[0, -3:, -3:, -1] __snake_case : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) __snake_case : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 @slow @require_torch class a_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Tuple = 'google/ncsnpp-church-256' __snake_case : Dict = UNetaDModel.from_pretrained(__a) __snake_case : List[Any] = ScoreSdeVeScheduler.from_pretrained(__a) __snake_case : Tuple = ScoreSdeVePipeline(unet=__a , scheduler=__a) sde_ve.to(__a) sde_ve.set_progress_bar_config(disable=__a) __snake_case : Optional[int] = torch.manual_seed(0) __snake_case : Optional[Any] = sde_ve(num_inference_steps=1_0 , output_type='numpy' , generator=__a).images __snake_case : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 2_5_6, 2_5_6, 3) __snake_case : Optional[Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
711
'''simple docstring''' import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = VQModel _snake_case = """sample""" @property def SCREAMING_SNAKE_CASE__ (self , __a=(3_2, 3_2)) -> str: """simple docstring""" __snake_case : Dict = 4 __snake_case : Optional[int] = 3 __snake_case : str = floats_tensor((batch_size, num_channels) + sizes).to(__a) return {"sample": image} @property def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" return (3, 3_2, 3_2) @property def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" return (3, 3_2, 3_2) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = { 'block_out_channels': [3_2, 6_4], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 3, } __snake_case : List[Any] = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case ,__snake_case : List[Any] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=__a) self.assertIsNotNone(__a) self.assertEqual(len(loading_info['missing_keys']) , 0) model.to(__a) __snake_case : Any = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = VQModel.from_pretrained('fusing/vqgan-dummy') model.to(__a).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) __snake_case : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size) __snake_case : Optional[int] = image.to(__a) with torch.no_grad(): __snake_case : List[Any] = model(__a).sample __snake_case : int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off __snake_case : int = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143]) # fmt: on self.assertTrue(torch.allclose(__a , __a , atol=1E-3))
61
0
'''simple docstring''' import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py __A = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. __A = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. __A = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') __A = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __A = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # Fill this with tuples (pipeline_tag, model_mapping, auto_model) __A = [ ('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''), ('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''), ('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''), ('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''), ('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''), ('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''), ('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''), ('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''), ('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''), ( '''zero-shot-object-detection''', '''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForZeroShotObjectDetection''', ), ('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''), ('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''), ('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''), ('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''), ( '''table-question-answering''', '''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForTableQuestionAnswering''', ), ('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''), ('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''), ( '''next-sentence-prediction''', '''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''', '''AutoModelForNextSentencePrediction''', ), ( '''audio-frame-classification''', '''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioFrameClassification''', ), ('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''), ( '''document-question-answering''', '''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForDocumentQuestionAnswering''', ), ( '''visual-question-answering''', '''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForVisualQuestionAnswering''', ), ('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''), ( '''zero-shot-image-classification''', '''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForZeroShotImageClassification''', ), ('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''), ('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''), ('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''), ] def _SCREAMING_SNAKE_CASE ( A : Tuple ) -> Optional[int]: """simple docstring""" __snake_case : Dict = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , A ) return [m.group(0 ) for m in matches] def _SCREAMING_SNAKE_CASE ( ) -> List[Any]: """simple docstring""" __snake_case : List[str] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES __snake_case : Optional[Any] = { config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. __snake_case : Optional[Any] = collections.defaultdict(A ) __snake_case : List[str] = collections.defaultdict(A ) __snake_case : Optional[Any] = collections.defaultdict(A ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(A ): __snake_case : Tuple = None if _re_tf_models.match(A ) is not None: __snake_case : Tuple = tf_models __snake_case : str = _re_tf_models.match(A ).groups()[0] elif _re_flax_models.match(A ) is not None: __snake_case : int = flax_models __snake_case : Dict = _re_flax_models.match(A ).groups()[0] elif _re_pt_models.match(A ) is not None: __snake_case : int = pt_models __snake_case : int = _re_pt_models.match(A ).groups()[0] if lookup_dict is not None: while len(A ) > 0: if attr_name in model_prefix_to_model_type: __snake_case : Optional[int] = True break # Try again after removing the last word in the name __snake_case : Tuple = ''.join(camel_case_split(A )[:-1] ) __snake_case : Optional[Any] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) __snake_case : List[Any] = list(A ) all_models.sort() __snake_case : List[Any] = {'model_type': all_models} __snake_case : str = [pt_models[t] for t in all_models] __snake_case : List[str] = [tf_models[t] for t in all_models] __snake_case : Optional[int] = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure __snake_case : Optional[Any] = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: __snake_case : Any = 'AutoProcessor' elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: __snake_case : Optional[Any] = 'AutoTokenizer' elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: __snake_case : Optional[int] = 'AutoFeatureExtractor' else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. __snake_case : Optional[Any] = 'AutoTokenizer' __snake_case : Optional[int] = [processors[t] for t in all_models] return pd.DataFrame(A ) def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] ) -> Optional[int]: """simple docstring""" __snake_case : str = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: __snake_case : Any = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""] __snake_case : Optional[Any] = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""] # Loop through all three frameworks for module, cls, mapping in zip(A , A , A ): # The type of pipeline may not exist in this framework if not hasattr(A , A ): continue # First extract all model_names __snake_case : Any = [] for name in getattr(A , A ).values(): if isinstance(A , A ): model_names.append(A ) else: model_names.extend(list(A ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def _SCREAMING_SNAKE_CASE ( A : Optional[Any] , A : List[Any] ) -> Dict: """simple docstring""" __snake_case : Optional[Any] = get_frameworks_table() __snake_case : Optional[Any] = Dataset.from_pandas(A ) __snake_case : Optional[int] = hf_hub_download( 'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=A ) __snake_case : Optional[int] = Dataset.from_json(A ) __snake_case : int = { tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class']) for i in range(len(A ) ) } __snake_case : int = update_pipeline_and_auto_class_table(A ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. __snake_case : Tuple = sorted(table.keys() ) __snake_case : Tuple = pd.DataFrame( { 'model_class': model_classes, 'pipeline_tag': [table[m][0] for m in model_classes], 'auto_class': [table[m][1] for m in model_classes], } ) __snake_case : Any = Dataset.from_pandas(A ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(A , 'frameworks.json' ) ) tags_dataset.to_json(os.path.join(A , 'pipeline_tags.json' ) ) if commit_sha is not None: __snake_case : int = ( F"""Update with commit {commit_sha}\n\nSee: """ F"""https://github.com/huggingface/transformers/commit/{commit_sha}""" ) else: __snake_case : Tuple = 'Update' upload_folder( repo_id='huggingface/transformers-metadata' , folder_path=A , repo_type='dataset' , token=A , commit_message=A , ) def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]: """simple docstring""" __snake_case : Optional[int] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} __snake_case : Any = transformers_module.pipelines.SUPPORTED_TASKS __snake_case : List[Any] = [] for key in pipeline_tasks: if key not in in_table: __snake_case : str = pipeline_tasks[key]['pt'] if isinstance(A , (list, tuple) ): __snake_case : List[str] = model[0] __snake_case : Dict = model.__name__ if model not in in_table.values(): missing.append(A ) if len(A ) > 0: __snake_case : Union[str, Any] = ', '.join(A ) raise ValueError( 'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside ' F"""`utils/update_metadata.py`: {msg}. Please add them!""" ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''') parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''') parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''') __A = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
712
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __A = logging.getLogger(__name__) @dataclass class a_ : _snake_case = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) _snake_case = field( default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) _snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class a_ : _snake_case = field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , ) _snake_case = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case ,__snake_case ,__snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case ,__snake_case ,__snake_case : int = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ' --overwrite_output_dir to overcome.' ) __snake_case : List[str] = import_module('tasks' ) try: __snake_case : Any = getattr(A , model_args.task_type ) __snake_case : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , A ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task __snake_case : Optional[Any] = token_classification_task.get_labels(data_args.labels ) __snake_case : Dict[int, str] = dict(enumerate(A ) ) __snake_case : Optional[Any] = len(A ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , ) __snake_case : List[str] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) __snake_case : Optional[int] = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , ) # Get datasets __snake_case : List[Any] = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __snake_case : int = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(A : np.ndarray , A : np.ndarray ) -> Tuple[List[int], List[int]]: __snake_case : str = np.argmax(A , axis=2 ) __snake_case ,__snake_case : int = preds.shape __snake_case : Dict = [[] for _ in range(A )] __snake_case : Union[str, Any] = [[] for _ in range(A )] for i in range(A ): for j in range(A ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(A : EvalPrediction ) -> Dict: __snake_case ,__snake_case : Any = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(A , A ), "precision": precision_score(A , A ), "recall": recall_score(A , A ), "f1": fa_score(A , A ), } # Data collator __snake_case : Optional[int] = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __snake_case : Optional[Any] = Trainer( model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __snake_case : List[Any] = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __snake_case : List[str] = trainer.evaluate() __snake_case : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) results.update(A ) # Predict if training_args.do_predict: __snake_case : str = TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) __snake_case ,__snake_case ,__snake_case : str = trainer.predict(A ) __snake_case ,__snake_case : List[str] = align_predictions(A , A ) __snake_case : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) # Save predictions __snake_case : List[str] = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(A , A , A ) return results def _SCREAMING_SNAKE_CASE ( A : int ) -> Any: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
61
0
'''simple docstring''' import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" __snake_case : List[str] = ArgumentParser( description=( 'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes' ) ) # Optional arguments for the launch helper parser.add_argument('--num_cores' , type=A , default=1 , help='Number of TPU cores to use (1 or 8).' ) # positional parser.add_argument( 'training_script' , type=A , help=( 'The full path to the single TPU training ' 'program/script to be launched in parallel, ' 'followed by all the arguments for the ' 'training script' ) , ) # rest from the training program parser.add_argument('training_script_args' , nargs=A ) return parser.parse_args() def _SCREAMING_SNAKE_CASE ( ) -> Any: """simple docstring""" __snake_case : Dict = parse_args() # Import training_script as a module. __snake_case : List[str] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) __snake_case : Optional[Any] = script_fpath.stem __snake_case : Optional[int] = importlib.import_module(A ) # Patch sys.argv __snake_case : Union[str, Any] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
713
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : list ) -> list: """simple docstring""" __snake_case : Tuple = False while is_sorted is False: # Until all the indices are traversed keep looping __snake_case : Optional[Any] = True for i in range(0 , len(A ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: __snake_case ,__snake_case : int = input_list[i + 1], input_list[i] # swapping if elements not in order __snake_case : List[Any] = False for i in range(1 , len(A ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: __snake_case ,__snake_case : Tuple = input_list[i + 1], input_list[i] # swapping if elements not in order __snake_case : Any = False return input_list if __name__ == "__main__": print('''Enter list to be sorted''') __A = [int(x) for x in input().split()] # inputing elements of the list in one line __A = odd_even_sort(input_list) print('''The sorted list is''') print(sorted_list)
61
0
'''simple docstring''' from collections.abc import Callable def _SCREAMING_SNAKE_CASE ( A : Callable[[float], float] , A : float , A : float ) -> float: __snake_case : float = a __snake_case : float = b if function(A ) == 0: # one of the a or b is a root for the function return a elif function(A ) == 0: return b elif ( function(A ) * function(A ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: __snake_case : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(A ) == 0: return mid elif function(A ) * function(A ) < 0: __snake_case : Optional[int] = mid else: __snake_case : Any = mid __snake_case : Optional[Any] = start + (end - start) / 2.0 return mid def _SCREAMING_SNAKE_CASE ( A : float ) -> float: return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_0_0_0)) import doctest doctest.testmod()
714
'''simple docstring''' import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger() def _SCREAMING_SNAKE_CASE ( A : int , A : str , A : LevitConfig , A : Path , A : bool = True ) -> Dict: """simple docstring""" print(F"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 1_28: if name[-1] == "S": __snake_case : Optional[int] = timm.create_model('levit_128s' , pretrained=A ) else: __snake_case : Tuple = timm.create_model('levit_128' , pretrained=A ) if hidden_sizes == 1_92: __snake_case : int = timm.create_model('levit_192' , pretrained=A ) if hidden_sizes == 2_56: __snake_case : List[Any] = timm.create_model('levit_256' , pretrained=A ) if hidden_sizes == 3_84: __snake_case : int = timm.create_model('levit_384' , pretrained=A ) from_model.eval() __snake_case : str = LevitForImageClassificationWithTeacher(A ).eval() __snake_case : int = OrderedDict() __snake_case : Optional[Any] = from_model.state_dict() __snake_case : Tuple = list(from_model.state_dict().keys() ) __snake_case : List[str] = list(our_model.state_dict().keys() ) print(len(A ) , len(A ) ) for i in range(len(A ) ): __snake_case : Optional[int] = weights[og_keys[i]] our_model.load_state_dict(A ) __snake_case : Tuple = torch.randn((2, 3, 2_24, 2_24) ) __snake_case : Union[str, Any] = from_model(A ) __snake_case : List[str] = our_model(A ).logits assert torch.allclose(A , A ), "The model logits don't match the original one." __snake_case : int = name print(A ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __snake_case : int = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F"""Pushed {checkpoint_name}""" ) def _SCREAMING_SNAKE_CASE ( A : Path , A : str = None , A : bool = True ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = 'imagenet-1k-id2label.json' __snake_case : Tuple = 10_00 __snake_case : Dict = (1, num_labels) __snake_case : List[str] = 'huggingface/label-files' __snake_case : Any = num_labels __snake_case : str = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) ) __snake_case : Any = {int(A ): v for k, v in idalabel.items()} __snake_case : int = idalabel __snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()} __snake_case : Optional[int] = partial(A , num_labels=A , idalabel=A , labelaid=A ) __snake_case : Dict = { 'levit-128S': 1_28, 'levit-128': 1_28, 'levit-192': 1_92, 'levit-256': 2_56, 'levit-384': 3_84, } __snake_case : Union[str, Any] = { 'levit-128S': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-128': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-192': ImageNetPreTrainedConfig( hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-256': ImageNetPreTrainedConfig( hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-384': ImageNetPreTrainedConfig( hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , A , names_to_config[model_name] , A , A ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , A , A , A , A ) return config, expected_shape if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''levit-dump-folder/''', type=Path, required=False, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) __A = parser.parse_args() __A = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
61
0
'''simple docstring''' import math import sys def _SCREAMING_SNAKE_CASE ( A : str ) -> str: """simple docstring""" __snake_case : Dict = '' try: with open(A , 'rb' ) as binary_file: __snake_case : Optional[int] = binary_file.read() for dat in data: __snake_case : str = F"""{dat:08b}""" result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def _SCREAMING_SNAKE_CASE ( A : str ) -> str: """simple docstring""" __snake_case : str = {'0': '0', '1': '1'} __snake_case : Tuple = '', '' __snake_case : Optional[int] = len(A ) for i in range(len(A ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue __snake_case : Any = lexicon[curr_string] result += last_match_id __snake_case : Tuple = last_match_id + '0' if math.loga(A ).is_integer(): __snake_case : Optional[Any] = {} for curr_key in list(A ): __snake_case : Any = lexicon.pop(A ) __snake_case : str = new_lex __snake_case : Dict = last_match_id + '1' index += 1 __snake_case : Tuple = '' return result def _SCREAMING_SNAKE_CASE ( A : str , A : str ) -> None: """simple docstring""" __snake_case : int = 8 try: with open(A , 'wb' ) as opened_file: __snake_case : str = [ to_write[i : i + byte_length] for i in range(0 , len(A ) , A ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(A , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def _SCREAMING_SNAKE_CASE ( A : str ) -> str: """simple docstring""" __snake_case : Optional[int] = 0 for letter in data_bits: if letter == "1": break counter += 1 __snake_case : Any = data_bits[counter:] __snake_case : str = data_bits[counter + 1 :] return data_bits def _SCREAMING_SNAKE_CASE ( A : str , A : str ) -> None: """simple docstring""" __snake_case : List[Any] = read_file_binary(A ) __snake_case : Tuple = remove_prefix(A ) __snake_case : Optional[Any] = decompress_data(A ) write_file_binary(A , A ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
715
'''simple docstring''' import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class a_ : def __init__(self , __a , __a = 1_3 , __a = 6_4 , __a = 2 , __a = 3 , __a = 3 , __a = True , __a = True , __a = 1_2_8 , __a=[1_6, 3_2, 6_4, 1_2_8] , __a = 7 , __a = 4 , __a = 3_7 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 1_0 , __a = 0.02 , __a = 2 , __a = 1 , __a = 1_2_8 , __a = [2, 2, 2, 2] , __a = 2 , __a = 2 , ) -> str: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Optional[int] = batch_size __snake_case : Optional[Any] = image_size __snake_case : Optional[int] = patch_size __snake_case : Optional[Any] = num_channels __snake_case : Optional[Any] = is_training __snake_case : Tuple = use_labels __snake_case : Optional[int] = hidden_size __snake_case : Any = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : Tuple = intermediate_size __snake_case : List[str] = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : Dict = type_sequence_label_size __snake_case : str = initializer_range __snake_case : int = encoder_stride __snake_case : List[str] = num_attention_outputs __snake_case : Optional[Any] = embed_dim __snake_case : Optional[Any] = embed_dim + 1 __snake_case : List[str] = resolution __snake_case : Optional[int] = depths __snake_case : List[Any] = hidden_sizes __snake_case : List[str] = dim __snake_case : Union[str, Any] = mlp_expansion_ratio def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __snake_case : List[str] = None if self.use_labels: __snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size) __snake_case : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = TFEfficientFormerModel(config=__a) __snake_case : int = model(__a , training=__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple: """simple docstring""" __snake_case : Dict = self.type_sequence_label_size __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : Optional[int] = model(__a , labels=__a , training=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images __snake_case : List[Any] = 1 __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __snake_case : str = model(__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Union[str, Any] = self.prepare_config_and_inputs() __snake_case ,__snake_case ,__snake_case : Union[str, Any] = config_and_inputs __snake_case : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) _snake_case = ( { """feature-extraction""": TFEfficientFormerModel, """image-classification""": ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) _snake_case = False _snake_case = False _snake_case = False _snake_case = False _snake_case = False def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Dict = TFEfficientFormerModelTester(self) __snake_case : List[Any] = ConfigTester( self , config_class=__a , has_text_modality=__a , hidden_size=3_7) def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds') def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings') def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case ,__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[int] = model_class(__a) __snake_case : Union[str, Any] = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Optional[int] = [*signature.parameters.keys()] __snake_case : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" def check_hidden_states_output(__a , __a , __a): __snake_case : str = model_class(__a) __snake_case : List[Any] = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(__a) , __a) if hasattr(self.model_tester , 'encoder_seq_length'): __snake_case : List[Any] = self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length') and self.model_tester.chunk_length > 1: __snake_case : str = seq_length * self.model_tester.chunk_length else: __snake_case : Optional[int] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: __snake_case : List[Any] = outputs.decoder_hidden_states self.asseretIsInstance(__a , (list, tuple)) self.assertEqual(len(__a) , __a) __snake_case : List[str] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'decoder_seq_length' , __a) self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , ) __snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : List[str] = True check_hidden_states_output(__a , __a , __a) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(__a , __a , __a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a=False) -> int: """simple docstring""" __snake_case : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet') def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = TFEfficientFormerModel.from_pretrained(__a) self.assertIsNotNone(__a) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Tuple = True __snake_case : Optional[Any] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'key_length' , __a) __snake_case : Optional[Any] = getattr(self.model_tester , 'chunk_length' , __a) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes'): __snake_case : str = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: __snake_case : Optional[Any] = True __snake_case : Dict = False __snake_case : Optional[int] = True __snake_case : Dict = model_class(__a) __snake_case : Tuple = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Dict = True __snake_case : str = model_class(__a) __snake_case : str = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case ,__snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model __snake_case : Tuple = model_class(__a) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes __snake_case : Optional[Any] = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__a) for key, val in model.input_signature.items() if key in model.dummy_inputs } __snake_case : Tuple = model(__a) self.assertTrue(outputs_dict is not None) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" __snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class a_ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : List[str] = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300') __snake_case : Optional[int] = self.default_image_processor __snake_case : List[Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : List[str] = model(**__a , training=__a) # verify the logits __snake_case : str = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4)) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300') __snake_case : List[Any] = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : Optional[int] = model(**__a , training=__a) # verify the logits __snake_case : Optional[int] = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : List[str] = tf.constant([-0.1_312, 0.4_353, -1.0_499]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4))
61
0
'''simple docstring''' from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging __A = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class a_ ( UpperCamelCase_ ): def __init__(self , __a = 1_0_1) -> Dict: """simple docstring""" __snake_case : Union[str, Any] = length def __len__(self) -> List[str]: """simple docstring""" return self.length def __getitem__(self , __a) -> int: """simple docstring""" return i class a_ : def __call__(self , __a) -> List[str]: """simple docstring""" return {"input_ids": torch.tensor(__a), "labels": torch.tensor(__a)} class a_ ( nn.Module ): def __init__(self) -> Union[str, Any]: """simple docstring""" super().__init__() # Add some (unused) params otherwise DDP will complain. __snake_case : List[str] = nn.Linear(1_2_0 , 8_0) def SCREAMING_SNAKE_CASE__ (self , __a , __a=None) -> str: """simple docstring""" if labels is not None: return torch.tensor(0.0 , device=input_ids.device), input_ids else: return input_ids class a_ ( UpperCamelCase_ ): @require_torch_neuroncore def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[Any] = F"""--nproc_per_node=2 --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() __snake_case : List[Any] = self.get_auto_remove_tmp_dir() __snake_case : Any = F"""--output_dir {output_dir}""".split() __snake_case : Union[str, Any] = ['torchrun'] + distributed_args + args execute_subprocess_async(__a , env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call class a_ ( UpperCamelCase_ ): @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : Dict = F"""--nproc_per_node={torch.cuda.device_count()} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() __snake_case : Dict = self.get_auto_remove_tmp_dir() __snake_case : Optional[Any] = F"""--output_dir {output_dir}""".split() __snake_case : str = ['torchrun'] + distributed_args + args execute_subprocess_async(__a , env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py __A = HfArgumentParser((TrainingArguments,)) __A = parser.parse_args_into_dataclasses()[0] logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, ''' f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}''' ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [1_0_1, 4_0, 7]: __A = DummyDataset(dataset_length) def _SCREAMING_SNAKE_CASE ( A : EvalPrediction ) -> Dict: """simple docstring""" __snake_case : str = list(range(len(A ) ) ) __snake_case : Dict = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( 'Predictions and/or labels do not match expected results:\n - predictions: ' F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" ) return {"success": success} __A = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) __A = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __A = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __A = 2 __A = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __A = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __A = None
716
'''simple docstring''' __A = {str(digit): digit**5 for digit in range(1_0)} def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) ) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" return sum( number for number in range(10_00 , 1_00_00_00 ) if number == digits_fifth_powers_sum(A ) ) if __name__ == "__main__": print(solution())
61
0
'''simple docstring''' import unittest import torch from torch import nn from diffusers.models.activations import get_activation class a_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : str = get_activation('swish') self.assertIsInstance(__a , nn.SiLU) self.assertEqual(act(torch.tensor(-1_0_0 , dtype=torch.floataa)).item() , 0) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa)).item() , 2_0) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : List[Any] = get_activation('silu') self.assertIsInstance(__a , nn.SiLU) self.assertEqual(act(torch.tensor(-1_0_0 , dtype=torch.floataa)).item() , 0) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa)).item() , 2_0) def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : List[str] = get_activation('mish') self.assertIsInstance(__a , nn.Mish) self.assertEqual(act(torch.tensor(-2_0_0 , dtype=torch.floataa)).item() , 0) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa)).item() , 2_0) def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" __snake_case : List[str] = get_activation('gelu') self.assertIsInstance(__a , nn.GELU) self.assertEqual(act(torch.tensor(-1_0_0 , dtype=torch.floataa)).item() , 0) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa)).item() , 2_0)
717
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class a_ : _snake_case = 42 _snake_case = None _snake_case = None def _SCREAMING_SNAKE_CASE ( ) -> Node | None: """simple docstring""" __snake_case : str = Node(1 ) __snake_case : Tuple = Node(2 ) __snake_case : Optional[int] = Node(3 ) __snake_case : List[str] = Node(4 ) __snake_case : List[str] = Node(5 ) return tree def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> int: """simple docstring""" return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] if root is None: return output __snake_case : Optional[int] = deque([root] ) while process_queue: __snake_case : List[str] = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(A , A ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(A , A ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None] | list[Any]: """simple docstring""" if root is None: return [] __snake_case : list[Sequence[Node | None]] = [] __snake_case : List[Any] = 0 __snake_case : int = height(A ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(A , A ) ) __snake_case : int = 1 else: output.append(get_nodes_from_right_to_left(A , A ) ) __snake_case : Tuple = 0 return output def _SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing. """simple docstring""" __snake_case : Optional[int] = make_tree() print(F"""In-order Traversal: {inorder(A )}""" ) print(F"""Pre-order Traversal: {preorder(A )}""" ) print(F"""Post-order Traversal: {postorder(A )}""" , '\n' ) print(F"""Height of Tree: {height(A )}""" , '\n' ) print('Complete Level Order Traversal: ' ) print(level_order(A ) , '\n' ) print('Level-wise order Traversal: ' ) for level in range(1 , height(A ) + 1 ): print(F"""Level {level}:""" , get_nodes_from_left_to_right(A , level=A ) ) print('\nZigZag order Traversal: ' ) print(zigzag(A ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
61
0
'''simple docstring''' import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version __A = get_logger(__name__) class a_ : _snake_case = """dummy_data""" _snake_case = """datasets""" _snake_case = False def __init__(self , __a , __a , __a , __a = None , __a = False , __a = True , __a = None , ) -> str: """simple docstring""" __snake_case : Union[str, Any] = 0 __snake_case : str = dataset_name __snake_case : str = cache_dir __snake_case : Tuple = use_local_dummy_data __snake_case : int = config # download_callbacks take a single url as input __snake_case : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root __snake_case : List[str] = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general __snake_case : Optional[int] = str(__a) # to be downloaded __snake_case : List[str] = None __snake_case : List[str] = None @property def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" if self._dummy_file is None: __snake_case : Tuple = self.download_dummy_data() return self._dummy_file @property def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" if self.config is not None: # structure is dummy / config_name / version_name return os.path.join('dummy' , self.config.name , self.version_name) # structure is dummy / version_name return os.path.join('dummy' , self.version_name) @property def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return os.path.join(self.dummy_data_folder , 'dummy_data.zip') def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" __snake_case : str = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) __snake_case : int = cached_path( __a , cache_dir=self.cache_dir , extract_compressed_file=__a , force_extract=__a) return os.path.join(__a , self.dummy_file_name) @property def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file) @property def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" if self._bucket_url is None: __snake_case : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/')) return self._bucket_url @property def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" if os.path.isdir(self.dummy_file): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , '/').split('/')[:-1]) def SCREAMING_SNAKE_CASE__ (self , __a , *__a) -> Optional[Any]: """simple docstring""" if self.load_existing_dummy_data: # dummy data is downloaded and tested __snake_case : int = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned __snake_case : Optional[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(__a , __a): return self.create_dummy_data_dict(__a , __a) elif isinstance(__a , (list, tuple)): return self.create_dummy_data_list(__a , __a) else: return self.create_dummy_data_single(__a , __a) def SCREAMING_SNAKE_CASE__ (self , __a , *__a) -> Optional[int]: """simple docstring""" return self.download_and_extract(__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> List[Any]: """simple docstring""" return self.download_and_extract(__a) def SCREAMING_SNAKE_CASE__ (self , __a , *__a , **__a) -> Union[str, Any]: """simple docstring""" return path def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return {} def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Tuple: """simple docstring""" __snake_case : Optional[Any] = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__a , __a): for single_url in single_urls: download_callback(__a) else: __snake_case : Optional[Any] = single_urls download_callback(__a) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__a , __a): __snake_case : Dict = [os.path.join(__a , urllib.parse.quote_plus(Path(__a).name)) for x in single_urls] else: __snake_case : Optional[Any] = single_urls __snake_case : List[str] = os.path.join(__a , urllib.parse.quote_plus(Path(__a).name)) __snake_case : Optional[Any] = value # make sure that values are unique if all(isinstance(__a , __a) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len( dummy_data_dict.values()): # append key to value to make its name unique __snake_case : Optional[int] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> List[Any]: """simple docstring""" __snake_case : Dict = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one __snake_case : int = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , __a)) for url in data_url) __snake_case : Any = all( url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed') for url in data_url) if data_url and (is_tf_records or is_pubmed_records): __snake_case : Optional[Any] = [data_url[0]] * len(__a) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__a) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __snake_case : List[Any] = os.path.join(__a , urllib.parse.quote_plus(single_url.split('/')[-1])) dummy_data_list.append(__a) return dummy_data_list def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]: """simple docstring""" for download_callback in self.download_callbacks: download_callback(__a) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __snake_case : Tuple = os.path.join(__a , urllib.parse.quote_plus(data_url.split('/')[-1])) if os.path.exists(__a) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]: """simple docstring""" def _iter_archive_members(__a): # this preserves the order of the members inside the ZIP archive __snake_case : Optional[Any] = Path(self.dummy_file).parent __snake_case : int = path.relative_to(__a) with ZipFile(self.local_path_to_dummy_data) as zip_file: __snake_case : List[Any] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix()): yield dummy_parent_path.joinpath(__a) __snake_case : Tuple = Path(__a) __snake_case : int = _iter_archive_members(__a) if self.use_local_dummy_data else path.rglob('*') for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith(('.', '__')): yield file_path.relative_to(__a).as_posix(), file_path.open('rb') def SCREAMING_SNAKE_CASE__ (self , __a) -> Optional[int]: """simple docstring""" if not isinstance(__a , __a): __snake_case : Union[str, Any] = [paths] for path in paths: if os.path.isfile(__a): if os.path.basename(__a).startswith(('.', '__')): return yield path else: for dirpath, dirnames, filenames in os.walk(__a): if os.path.basename(__a).startswith(('.', '__')): continue dirnames.sort() for filename in sorted(__a): if filename.startswith(('.', '__')): continue yield os.path.join(__a , __a)
718
'''simple docstring''' from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class a_ : def __init__(self , __a = None) -> None: """simple docstring""" if components is None: __snake_case : List[str] = [] __snake_case : Optional[int] = list(__a) def __len__(self) -> int: """simple docstring""" return len(self.__components) def __str__(self) -> str: """simple docstring""" return "(" + ",".join(map(__a , self.__components)) + ")" def __add__(self , __a) -> Vector: """simple docstring""" __snake_case : Optional[Any] = len(self) if size == len(__a): __snake_case : Optional[int] = [self.__components[i] + other.component(__a) for i in range(__a)] return Vector(__a) else: raise Exception('must have the same size') def __sub__(self , __a) -> Vector: """simple docstring""" __snake_case : Optional[Any] = len(self) if size == len(__a): __snake_case : Optional[int] = [self.__components[i] - other.component(__a) for i in range(__a)] return Vector(__a) else: # error case raise Exception('must have the same size') @overload def __mul__(self , __a) -> Vector: """simple docstring""" ... @overload def __mul__(self , __a) -> float: """simple docstring""" ... def __mul__(self , __a) -> float | Vector: """simple docstring""" if isinstance(__a , (float, int)): __snake_case : str = [c * other for c in self.__components] return Vector(__a) elif isinstance(__a , __a) and len(self) == len(__a): __snake_case : List[Any] = len(self) __snake_case : Dict = [self.__components[i] * other.component(__a) for i in range(__a)] return sum(__a) else: # error case raise Exception('invalid operand!') def SCREAMING_SNAKE_CASE__ (self) -> Vector: """simple docstring""" return Vector(self.__components) def SCREAMING_SNAKE_CASE__ (self , __a) -> float: """simple docstring""" if isinstance(__a , __a) and -len(self.__components) <= i < len(self.__components): return self.__components[i] else: raise Exception('index out of range') def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> None: """simple docstring""" assert -len(self.__components) <= pos < len(self.__components) __snake_case : int = value def SCREAMING_SNAKE_CASE__ (self) -> float: """simple docstring""" if len(self.__components) == 0: raise Exception('Vector is empty') __snake_case : Tuple = [c**2 for c in self.__components] return math.sqrt(sum(__a)) def SCREAMING_SNAKE_CASE__ (self , __a , __a = False) -> float: """simple docstring""" __snake_case : Tuple = self * other __snake_case : Optional[int] = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den)) else: return math.acos(num / den) def _SCREAMING_SNAKE_CASE ( A : int ) -> Vector: """simple docstring""" assert isinstance(A , A ) return Vector([0] * dimension ) def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> Vector: """simple docstring""" assert isinstance(A , A ) and (isinstance(A , A )) __snake_case : Any = [0] * dimension __snake_case : int = 1 return Vector(A ) def _SCREAMING_SNAKE_CASE ( A : float , A : Vector , A : Vector ) -> Vector: """simple docstring""" assert ( isinstance(A , A ) and isinstance(A , A ) and (isinstance(A , (int, float) )) ) return x * scalar + y def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int ) -> Vector: """simple docstring""" random.seed(A ) __snake_case : List[Any] = [random.randint(A , A ) for _ in range(A )] return Vector(A ) class a_ : def __init__(self , __a , __a , __a) -> None: """simple docstring""" __snake_case : Union[str, Any] = matrix __snake_case : int = w __snake_case : str = h def __str__(self) -> str: """simple docstring""" __snake_case : Dict = '' for i in range(self.__height): ans += "|" for j in range(self.__width): if j < self.__width - 1: ans += str(self.__matrix[i][j]) + "," else: ans += str(self.__matrix[i][j]) + "|\n" return ans def __add__(self , __a) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __snake_case : Tuple = [] for i in range(self.__height): __snake_case : List[Any] = [ self.__matrix[i][j] + other.component(__a , __a) for j in range(self.__width) ] matrix.append(__a) return Matrix(__a , self.__width , self.__height) else: raise Exception('matrix must have the same dimension!') def __sub__(self , __a) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __snake_case : str = [] for i in range(self.__height): __snake_case : List[str] = [ self.__matrix[i][j] - other.component(__a , __a) for j in range(self.__width) ] matrix.append(__a) return Matrix(__a , self.__width , self.__height) else: raise Exception('matrices must have the same dimension!') @overload def __mul__(self , __a) -> Matrix: """simple docstring""" ... @overload def __mul__(self , __a) -> Vector: """simple docstring""" ... def __mul__(self , __a) -> Vector | Matrix: """simple docstring""" if isinstance(__a , __a): # matrix-vector if len(__a) == self.__width: __snake_case : Tuple = zero_vector(self.__height) for i in range(self.__height): __snake_case : Union[str, Any] = [ self.__matrix[i][j] * other.component(__a) for j in range(self.__width) ] ans.change_component(__a , sum(__a)) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!') elif isinstance(__a , (int, float)): # matrix-scalar __snake_case : str = [ [self.__matrix[i][j] * other for j in range(self.__width)] for i in range(self.__height) ] return Matrix(__a , self.__width , self.__height) return None def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.__height def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.__width def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds') def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> None: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: __snake_case : List[Any] = value else: raise Exception('change_component: indices out of bounds') def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') __snake_case : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(__a)): __snake_case : Tuple = minor[i][:y] + minor[i][y + 1 :] return Matrix(__a , self.__width - 1 , self.__height - 1).determinant() def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(__a , __a) else: raise Exception('Indices out of bounds') def SCREAMING_SNAKE_CASE__ (self) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') if self.__height < 1: raise Exception('Matrix has no element') elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __snake_case : Any = [ self.__matrix[0][y] * self.cofactor(0 , __a) for y in range(self.__width) ] return sum(__a) def _SCREAMING_SNAKE_CASE ( A : int ) -> Matrix: """simple docstring""" __snake_case : list[list[float]] = [[0] * n for _ in range(A )] return Matrix(A , A , A ) def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int , A : int ) -> Matrix: """simple docstring""" random.seed(A ) __snake_case : list[list[float]] = [ [random.randint(A , A ) for _ in range(A )] for _ in range(A ) ] return Matrix(A , A , A )
61
0
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> Dict: """simple docstring""" __snake_case : int = SwinConfig() __snake_case : List[str] = swin_name.split('_' ) __snake_case : Any = name_split[1] __snake_case : List[str] = int(name_split[4] ) __snake_case : Optional[Any] = int(name_split[3][-1] ) if model_size == "tiny": __snake_case : Dict = 96 __snake_case : Optional[int] = (2, 2, 6, 2) __snake_case : Any = (3, 6, 12, 24) elif model_size == "small": __snake_case : List[str] = 96 __snake_case : List[Any] = (2, 2, 18, 2) __snake_case : Any = (3, 6, 12, 24) elif model_size == "base": __snake_case : Tuple = 1_28 __snake_case : Tuple = (2, 2, 18, 2) __snake_case : Dict = (4, 8, 16, 32) else: __snake_case : Optional[Any] = 1_92 __snake_case : Optional[int] = (2, 2, 18, 2) __snake_case : Optional[Any] = (6, 12, 24, 48) if "in22k" in swin_name: __snake_case : Dict = 2_18_41 else: __snake_case : Optional[int] = 10_00 __snake_case : Union[str, Any] = 'huggingface/label-files' __snake_case : Tuple = 'imagenet-1k-id2label.json' __snake_case : Optional[int] = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) ) __snake_case : str = {int(A ): v for k, v in idalabel.items()} __snake_case : Optional[int] = idalabel __snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()} __snake_case : Union[str, Any] = img_size __snake_case : Any = num_classes __snake_case : int = embed_dim __snake_case : Optional[int] = depths __snake_case : Optional[Any] = num_heads __snake_case : List[str] = window_size return config def _SCREAMING_SNAKE_CASE ( A : List[str] ) -> Union[str, Any]: """simple docstring""" if "patch_embed.proj" in name: __snake_case : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __snake_case : Optional[Any] = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: __snake_case : int = 'encoder.' + name if "attn.proj" in name: __snake_case : List[Any] = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: __snake_case : List[str] = name.replace('attn' , 'attention.self' ) if "norm1" in name: __snake_case : int = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: __snake_case : Union[str, Any] = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: __snake_case : Optional[Any] = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: __snake_case : List[str] = name.replace('mlp.fc2' , 'output.dense' ) if name == "norm.weight": __snake_case : Any = 'layernorm.weight' if name == "norm.bias": __snake_case : List[str] = 'layernorm.bias' if "head" in name: __snake_case : Union[str, Any] = name.replace('head' , 'classifier' ) else: __snake_case : Union[str, Any] = 'swin.' + name return name def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> Optional[Any]: """simple docstring""" for key in orig_state_dict.copy().keys(): __snake_case : str = orig_state_dict.pop(A ) if "mask" in key: continue elif "qkv" in key: __snake_case : Union[str, Any] = key.split('.' ) __snake_case : Any = int(key_split[1] ) __snake_case : Optional[int] = int(key_split[3] ) __snake_case : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __snake_case : List[Any] = val[:dim, :] __snake_case : Any = val[ dim : dim * 2, : ] __snake_case : str = val[-dim:, :] else: __snake_case : Dict = val[ :dim ] __snake_case : List[str] = val[ dim : dim * 2 ] __snake_case : Tuple = val[ -dim: ] else: __snake_case : Dict = val return orig_state_dict def _SCREAMING_SNAKE_CASE ( A : int , A : List[Any] ) -> Tuple: """simple docstring""" __snake_case : Dict = timm.create_model(A , pretrained=A ) timm_model.eval() __snake_case : Any = get_swin_config(A ) __snake_case : Optional[int] = SwinForImageClassification(A ) model.eval() __snake_case : List[Any] = convert_state_dict(timm_model.state_dict() , A ) model.load_state_dict(A ) __snake_case : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg' __snake_case : int = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) ) __snake_case : Dict = Image.open(requests.get(A , stream=A ).raw ) __snake_case : str = image_processor(images=A , return_tensors='pt' ) __snake_case : Optional[Any] = timm_model(inputs['pixel_values'] ) __snake_case : Optional[int] = model(**A ).logits assert torch.allclose(A , A , atol=1e-3 ) print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(A ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(A ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swin_name''', default='''swin_tiny_patch4_window7_224''', type=str, help='''Name of the Swin timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) __A = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
719
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification __A = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co __A = '''main''' # Default branch name __A = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2''' # One particular commit (not the top of `main`) __A = '''aaaaaaa''' # This commit does not exist, so we should 404. __A = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684''' # Sha-1 of config.json on the top of `main`, for checking purposes __A = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3''' @contextlib.contextmanager def _SCREAMING_SNAKE_CASE ( ) -> List[Any]: """simple docstring""" print('Welcome!' ) yield print('Bye!' ) @contextlib.contextmanager def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: """simple docstring""" print('Bonjour!' ) yield print('Au revoir!' ) class a_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" assert transformers.__spec__ is not None assert importlib.util.find_spec('transformers') is not None class a_ ( unittest.TestCase ): @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> int: """simple docstring""" with ContextManagers([]): print('Transformers are awesome!') # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n') @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]: """simple docstring""" with ContextManagers([context_en()]): print('Transformers are awesome!') # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n') @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> Tuple: """simple docstring""" with ContextManagers([context_fr(), context_en()]): print('Transformers are awesome!') # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n') @require_torch def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" self.assertEqual(find_labels(__a) , ['labels']) self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label']) self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions']) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , ['labels']) @require_tf def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" self.assertEqual(find_labels(__a) , ['labels']) self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label']) self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions']) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , ['labels']) @require_flax def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" self.assertEqual(find_labels(__a) , []) self.assertEqual(find_labels(__a) , []) self.assertEqual(find_labels(__a) , []) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , [])
61
0
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class a_ : def __init__(self , __a , __a=sys.maxsize) -> List[str]: """simple docstring""" __snake_case : Union[str, Any] = 'bilinear' __snake_case : Optional[int] = max_size __snake_case : str = short_edge_length def __call__(self , __a) -> int: """simple docstring""" __snake_case : str = [] for img in imgs: __snake_case : str = img.shape[:2] # later: provide list and randomly choose index for resize __snake_case : Any = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1) if size == 0: return img __snake_case : int = size * 1.0 / min(__a , __a) if h < w: __snake_case : Optional[int] = size, scale * w else: __snake_case : Tuple = scale * h, size if max(__a , __a) > self.max_size: __snake_case : int = self.max_size * 1.0 / max(__a , __a) __snake_case : List[Any] = newh * scale __snake_case : List[Any] = neww * scale __snake_case : Dict = int(neww + 0.5) __snake_case : Optional[Any] = int(newh + 0.5) if img.dtype == np.uinta: __snake_case : Tuple = Image.fromarray(__a) __snake_case : List[str] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR) __snake_case : Optional[int] = np.asarray(__a) else: __snake_case : List[str] = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw __snake_case : Any = nn.functional.interpolate( __a , (newh, neww) , mode=self.interp_method , align_corners=__a).squeeze(0) img_augs.append(__a) return img_augs class a_ : def __init__(self , __a) -> int: """simple docstring""" __snake_case : Union[str, Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST) __snake_case : str = cfg.INPUT.FORMAT __snake_case : Any = cfg.SIZE_DIVISIBILITY __snake_case : Union[str, Any] = cfg.PAD_VALUE __snake_case : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST __snake_case : List[Any] = cfg.MODEL.DEVICE __snake_case : List[str] = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) __snake_case : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) __snake_case : Optional[Any] = lambda __a: (x - self.pixel_mean) / self.pixel_std def SCREAMING_SNAKE_CASE__ (self , __a) -> Dict: """simple docstring""" __snake_case : int = tuple(max(__a) for s in zip(*[img.shape for img in images])) __snake_case : int = [im.shape[-2:] for im in images] __snake_case : Dict = [ nn.functional.pad( __a , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(__a , __a) ] return torch.stack(__a), torch.tensor(__a) def __call__(self , __a , __a=False) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): if not isinstance(__a , __a): __snake_case : List[Any] = [images] if single_image: assert len(__a) == 1 for i in range(len(__a)): if isinstance(images[i] , torch.Tensor): images.insert(__a , images.pop(__a).to(self.device).float()) elif not isinstance(images[i] , torch.Tensor): images.insert( __a , torch.as_tensor(img_tensorize(images.pop(__a) , input_format=self.input_format)) .to(self.device) .float() , ) # resize smallest edge __snake_case : List[Any] = torch.tensor([im.shape[:2] for im in images]) __snake_case : Optional[Any] = self.aug(__a) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic __snake_case : int = [self.normalizer(__a) for x in images] # now pad them to do the following operations __snake_case : Tuple = self.pad(__a) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad __snake_case : Any = torch.true_divide(__a , __a) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _SCREAMING_SNAKE_CASE ( A : Any , A : List[Any] ) -> Union[str, Any]: """simple docstring""" boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _SCREAMING_SNAKE_CASE ( A : Dict , A : Tuple[int, int] ) -> Optional[int]: """simple docstring""" assert torch.isfinite(A ).all(), "Box tensor contains infinite or NaN!" __snake_case : Optional[int] = box_size tensor[:, 0].clamp_(min=0 , max=A ) tensor[:, 1].clamp_(min=0 , max=A ) tensor[:, 2].clamp_(min=0 , max=A ) tensor[:, 3].clamp_(min=0 , max=A )
720
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''TimmBackbone'''] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
0
'''simple docstring''' import argparse import os import shutil from pathlib import Path import onnx import torch from packaging import version from torch.onnx import export from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline __A = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''') def _SCREAMING_SNAKE_CASE ( A : int , A : tuple , A : Path , A : List[Any] , A : List[str] , A : List[str] , A : Dict , A : Any=False , ) -> Dict: """simple docstring""" output_path.parent.mkdir(parents=A , exist_ok=A ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( A , A , f=output_path.as_posix() , input_names=A , output_names=A , dynamic_axes=A , do_constant_folding=A , use_external_data_format=A , enable_onnx_checker=A , opset_version=A , ) else: export( A , A , f=output_path.as_posix() , input_names=A , output_names=A , dynamic_axes=A , do_constant_folding=A , opset_version=A , ) @torch.no_grad() def _SCREAMING_SNAKE_CASE ( A : str , A : str , A : int , A : bool = False ) -> int: """simple docstring""" __snake_case : Optional[Any] = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): __snake_case : Union[str, Any] = 'cuda' elif fpaa and not torch.cuda.is_available(): raise ValueError('`float16` model export is only supported on GPUs with CUDA' ) else: __snake_case : int = 'cpu' __snake_case : Any = StableDiffusionPipeline.from_pretrained(A , torch_dtype=A ).to(A ) __snake_case : Optional[int] = Path(A ) # TEXT ENCODER __snake_case : Any = pipeline.text_encoder.config.max_position_embeddings __snake_case : Tuple = pipeline.text_encoder.config.hidden_size __snake_case : str = pipeline.tokenizer( 'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=A , return_tensors='pt' , ) onnx_export( pipeline.text_encoder , model_args=(text_input.input_ids.to(device=A , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={ 'input_ids': {0: 'batch', 1: 'sequence'}, } , opset=A , ) del pipeline.text_encoder # UNET __snake_case : List[str] = pipeline.unet.config.in_channels __snake_case : str = pipeline.unet.config.sample_size __snake_case : List[str] = output_path / 'unet' / 'model.onnx' onnx_export( pipeline.unet , model_args=( torch.randn(2 , A , A , A ).to(device=A , dtype=A ), torch.randn(2 ).to(device=A , dtype=A ), torch.randn(2 , A , A ).to(device=A , dtype=A ), False, ) , output_path=A , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={ 'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, 'timestep': {0: 'batch'}, 'encoder_hidden_states': {0: 'batch', 1: 'sequence'}, } , opset=A , use_external_data_format=A , ) __snake_case : Optional[int] = str(unet_path.absolute().as_posix() ) __snake_case : List[Any] = os.path.dirname(A ) __snake_case : List[str] = onnx.load(A ) # clean up existing tensor files shutil.rmtree(A ) os.mkdir(A ) # collate external tensor files into one onnx.save_model( A , A , save_as_external_data=A , all_tensors_to_one_file=A , location='weights.pb' , convert_attribute=A , ) del pipeline.unet # VAE ENCODER __snake_case : Any = pipeline.vae __snake_case : Optional[Any] = vae_encoder.config.in_channels __snake_case : List[Any] = vae_encoder.config.sample_size # need to get the raw tensor output (sample) from the encoder __snake_case : Dict = lambda A , A : vae_encoder.encode(A , A )[0].sample() onnx_export( A , model_args=( torch.randn(1 , A , A , A ).to(device=A , dtype=A ), False, ) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={ 'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, } , opset=A , ) # VAE DECODER __snake_case : List[str] = pipeline.vae __snake_case : Any = vae_decoder.config.latent_channels __snake_case : Optional[Any] = vae_decoder.config.out_channels # forward only through the decoder part __snake_case : List[str] = vae_encoder.decode onnx_export( A , model_args=( torch.randn(1 , A , A , A ).to(device=A , dtype=A ), False, ) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={ 'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, } , opset=A , ) del pipeline.vae # SAFETY CHECKER if pipeline.safety_checker is not None: __snake_case : int = pipeline.safety_checker __snake_case : List[str] = safety_checker.config.vision_config.num_channels __snake_case : str = safety_checker.config.vision_config.image_size __snake_case : Tuple = safety_checker.forward_onnx onnx_export( pipeline.safety_checker , model_args=( torch.randn( 1 , A , A , A , ).to(device=A , dtype=A ), torch.randn(1 , A , A , A ).to(device=A , dtype=A ), ) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={ 'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, 'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'}, } , opset=A , ) del pipeline.safety_checker __snake_case : Any = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' ) __snake_case : Optional[Any] = pipeline.feature_extractor else: __snake_case : List[str] = None __snake_case : List[str] = None __snake_case : Dict = OnnxStableDiffusionPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=A , feature_extractor=A , requires_safety_checker=safety_checker is not None , ) onnx_pipeline.save_pretrained(A ) print('ONNX pipeline saved to' , A ) del pipeline del onnx_pipeline __snake_case : Any = OnnxStableDiffusionPipeline.from_pretrained(A , provider='CPUExecutionProvider' ) print('ONNX pipeline is loadable' ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '''--model_path''', type=str, required=True, help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''', ) parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--opset''', default=1_4, type=int, help='''The version of the ONNX operator set to use.''', ) parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''') __A = parser.parse_args() convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
721
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" __snake_case : str = 1 for i in range(1 , num + 1 ): fact *= i return fact def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" __snake_case : Union[str, Any] = 0 while number > 0: __snake_case : Dict = number % 10 sum_of_digits += last_digit __snake_case : Union[str, Any] = number // 10 # Removing the last_digit from the given number return sum_of_digits def _SCREAMING_SNAKE_CASE ( A : int = 1_00 ) -> int: """simple docstring""" __snake_case : List[Any] = factorial(A ) __snake_case : Dict = split_and_add(A ) return result if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
61
0
'''simple docstring''' import functools def _SCREAMING_SNAKE_CASE ( A : str , A : str ) -> int: """simple docstring""" __snake_case : int = len(A ) __snake_case : Optional[Any] = len(A ) @functools.cache def min_distance(A : int , A : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa __snake_case : str = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , A ) , 1 + min_distance(A , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
700
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class a_ ( unittest.TestCase ): def __init__(self , __a , __a=7 , __a=3 , __a=1_8 , __a=3_0 , __a=4_0_0 , __a=True , __a=None , __a=True , __a=None , __a=True , __a=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __a=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __a=True , ) -> List[Any]: """simple docstring""" __snake_case : Tuple = size if size is not None else {'height': 2_2_4, 'width': 2_2_4} __snake_case : Any = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8} __snake_case : Optional[int] = parent __snake_case : Dict = batch_size __snake_case : str = num_channels __snake_case : Optional[Any] = image_size __snake_case : Optional[int] = min_resolution __snake_case : Tuple = max_resolution __snake_case : Optional[int] = do_resize __snake_case : Optional[int] = size __snake_case : Union[str, Any] = do_center_crop __snake_case : List[Any] = crop_size __snake_case : int = do_normalize __snake_case : Optional[Any] = image_mean __snake_case : str = image_std __snake_case : Optional[Any] = do_convert_rgb def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def SCREAMING_SNAKE_CASE__ (self , __a=False , __a=False , __a=False) -> List[str]: """simple docstring""" assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __snake_case : Optional[int] = [] for i in range(self.batch_size): image_inputs.append( np.random.randint( 2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta)) else: __snake_case : Dict = [] for i in range(self.batch_size): __snake_case ,__snake_case : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2) image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta)) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __snake_case : int = [Image.fromarray(np.moveaxis(__a , 0 , -1)) for x in image_inputs] if torchify: __snake_case : List[Any] = [torch.from_numpy(__a) for x in image_inputs] return image_inputs @require_torch @require_vision class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Union[str, Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=__a) @property def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : int = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__a , 'do_resize')) self.assertTrue(hasattr(__a , 'size')) self.assertTrue(hasattr(__a , 'do_center_crop')) self.assertTrue(hasattr(__a , 'center_crop')) self.assertTrue(hasattr(__a , 'do_normalize')) self.assertTrue(hasattr(__a , 'image_mean')) self.assertTrue(hasattr(__a , 'image_std')) self.assertTrue(hasattr(__a , 'do_convert_rgb')) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 2_2_4, 'width': 2_2_4}) self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8}) __snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4) self.assertEqual(image_processor.size , {'shortest_edge': 4_2}) self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4}) def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __snake_case : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a) for image in image_inputs: self.assertIsInstance(__a , Image.Image) # Test not batched input __snake_case : int = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : List[Any] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __snake_case : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=__a , numpify=__a) for image in image_inputs: self.assertIsInstance(__a , np.ndarray) # Test not batched input __snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : int = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Any = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __snake_case : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__a , torchify=__a) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor) # Test not batched input __snake_case : Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : Union[str, Any] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) @require_torch @require_vision class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__a) __snake_case : List[Any] = 3 @property def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__a , 'do_resize')) self.assertTrue(hasattr(__a , 'size')) self.assertTrue(hasattr(__a , 'do_center_crop')) self.assertTrue(hasattr(__a , 'center_crop')) self.assertTrue(hasattr(__a , 'do_normalize')) self.assertTrue(hasattr(__a , 'image_mean')) self.assertTrue(hasattr(__a , 'image_std')) self.assertTrue(hasattr(__a , 'do_convert_rgb')) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __snake_case : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a) for image in image_inputs: self.assertIsInstance(__a , Image.Image) # Test not batched input __snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : Optional[int] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
61
0
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE ( A : list , A : int , A : int , A : int ) -> list: """simple docstring""" __snake_case : List[Any] = [] __snake_case : Optional[int] = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) __snake_case : Optional[int] = result + left + right return input_list def _SCREAMING_SNAKE_CASE ( A : list ) -> list: """simple docstring""" if len(A ) <= 1: return input_list __snake_case : Any = list(A ) # iteration for two-way merging __snake_case : Tuple = 2 while p <= len(A ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(A ) , A ): __snake_case : Union[str, Any] = i __snake_case : Tuple = i + p - 1 __snake_case : Dict = (low + high + 1) // 2 __snake_case : int = merge(A , A , A , A ) # final merge of last two parts if p * 2 >= len(A ): __snake_case : str = i __snake_case : Any = merge(A , 0 , A , len(A ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": __A = input('''Enter numbers separated by a comma:\n''').strip() if user_input == "": __A = [] else: __A = [int(item.strip()) for item in user_input.split(''',''')] print(iter_merge_sort(unsorted))
701
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class a_ ( UpperCamelCase_ ): _snake_case = """vit_msn""" def __init__(self , __a=7_6_8 , __a=1_2 , __a=1_2 , __a=3_0_7_2 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1E-06 , __a=2_2_4 , __a=1_6 , __a=3 , __a=True , **__a , ) -> Any: """simple docstring""" super().__init__(**__a) __snake_case : List[str] = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : Optional[Any] = num_attention_heads __snake_case : str = intermediate_size __snake_case : List[str] = hidden_act __snake_case : List[Any] = hidden_dropout_prob __snake_case : Tuple = attention_probs_dropout_prob __snake_case : List[str] = initializer_range __snake_case : Optional[int] = layer_norm_eps __snake_case : Dict = image_size __snake_case : int = patch_size __snake_case : Dict = num_channels __snake_case : Tuple = qkv_bias
61
0
'''simple docstring''' from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING __A = logging.get_logger(__name__) @add_end_docstrings(UpperCamelCase_ ) class a_ ( UpperCamelCase_ ): def __init__(self , **__a) -> Optional[Any]: """simple docstring""" super().__init__(**__a) requires_backends(self , 'vision') requires_backends(self , 'torch') if self.framework != "pt": raise ValueError(F"""The {self.__class__} is only available in PyTorch.""") self.check_model_type(__a) def SCREAMING_SNAKE_CASE__ (self , **__a) -> int: """simple docstring""" __snake_case : List[Any] = {} __snake_case : List[str] = {} __snake_case : int = {} # preprocess args if "points_per_batch" in kwargs: __snake_case : Optional[int] = kwargs['points_per_batch'] if "points_per_crop" in kwargs: __snake_case : Dict = kwargs['points_per_crop'] if "crops_n_layers" in kwargs: __snake_case : Dict = kwargs['crops_n_layers'] if "crop_overlap_ratio" in kwargs: __snake_case : int = kwargs['crop_overlap_ratio'] if "crop_n_points_downscale_factor" in kwargs: __snake_case : int = kwargs['crop_n_points_downscale_factor'] # postprocess args if "pred_iou_thresh" in kwargs: __snake_case : Union[str, Any] = kwargs['pred_iou_thresh'] if "stability_score_offset" in kwargs: __snake_case : str = kwargs['stability_score_offset'] if "mask_threshold" in kwargs: __snake_case : Optional[int] = kwargs['mask_threshold'] if "stability_score_thresh" in kwargs: __snake_case : Dict = kwargs['stability_score_thresh'] if "crops_nms_thresh" in kwargs: __snake_case : List[str] = kwargs['crops_nms_thresh'] if "output_rle_mask" in kwargs: __snake_case : Union[str, Any] = kwargs['output_rle_mask'] if "output_bboxes_mask" in kwargs: __snake_case : List[Any] = kwargs['output_bboxes_mask'] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__(self , __a , *__a , __a=None , __a=None , **__a) -> Dict: """simple docstring""" return super().__call__(__a , *__a , num_workers=__a , batch_size=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a=6_4 , __a = 0 , __a = 5_1_2 / 1_5_0_0 , __a = 3_2 , __a = 1 , ) -> List[Any]: """simple docstring""" __snake_case : Tuple = load_image(__a) __snake_case : List[Any] = self.image_processor.size['longest_edge'] __snake_case : List[Any] = self.image_processor.generate_crop_boxes( __a , __a , __a , __a , __a , __a) __snake_case : Union[str, Any] = self.image_processor(images=__a , return_tensors='pt') with self.device_placement(): if self.framework == "pt": __snake_case : List[str] = self.get_inference_context() with inference_context(): __snake_case : List[Any] = self._ensure_tensor_on_device(__a , device=self.device) __snake_case : Optional[int] = self.model.get_image_embeddings(model_inputs.pop('pixel_values')) __snake_case : str = image_embeddings __snake_case : Optional[Any] = grid_points.shape[1] __snake_case : Dict = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( 'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. ' 'To return all points at once, set points_per_batch to None') for i in range(0 , __a , __a): __snake_case : Union[str, Any] = grid_points[:, i : i + points_per_batch, :, :] __snake_case : Any = input_labels[:, i : i + points_per_batch] __snake_case : Any = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def SCREAMING_SNAKE_CASE__ (self , __a , __a=0.88 , __a=0.95 , __a=0 , __a=1 , ) -> Optional[int]: """simple docstring""" __snake_case : List[Any] = model_inputs.pop('input_boxes') __snake_case : str = model_inputs.pop('is_last') __snake_case : int = model_inputs.pop('original_sizes').tolist() __snake_case : str = model_inputs.pop('reshaped_input_sizes').tolist() __snake_case : Tuple = self.model(**__a) # post processing happens here in order to avoid CPU GPU copies of ALL the masks __snake_case : str = model_outputs['pred_masks'] __snake_case : Any = self.image_processor.post_process_masks( __a , __a , __a , __a , binarize=__a) __snake_case : Optional[Any] = model_outputs['iou_scores'] __snake_case : Optional[int] = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __a , __a , __a , __a , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def SCREAMING_SNAKE_CASE__ (self , __a , __a=False , __a=False , __a=0.7 , ) -> List[str]: """simple docstring""" __snake_case : int = [] __snake_case : Optional[Any] = [] __snake_case : int = [] for model_output in model_outputs: all_scores.append(model_output.pop('iou_scores')) all_masks.extend(model_output.pop('masks')) all_boxes.append(model_output.pop('boxes')) __snake_case : Union[str, Any] = torch.cat(__a) __snake_case : Tuple = torch.cat(__a) __snake_case : List[str] = self.image_processor.post_process_for_mask_generation( __a , __a , __a , __a) __snake_case : Any = defaultdict(__a) for output in model_outputs: for k, v in output.items(): extra[k].append(__a) __snake_case : List[str] = {} if output_rle_mask: __snake_case : Optional[int] = rle_mask if output_bboxes_mask: __snake_case : int = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
702
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : float , A : list[float] ) -> float: """simple docstring""" if discount_rate < 0: raise ValueError('Discount rate cannot be negative' ) if not cash_flows: raise ValueError('Cash flows list cannot be empty' ) __snake_case : List[str] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(A ) ) return round(A , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
61
0
'''simple docstring''' import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def _SCREAMING_SNAKE_CASE ( A : Tuple=32 , A : Optional[int]=10 , A : Optional[int]=1_00 , A : List[str]=10_26 , A : List[str]=True , A : List[str]="data/tokenized_stories_train_wikitext103.jbl" , A : List[Any]="igf_context_pairs.jbl" , ) -> Optional[Any]: """simple docstring""" set_seed(3 ) # generate train_data and objective_set __snake_case : str = generate_datasets( A , A , number=A , min_len=10_26 , trim=A ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? __snake_case : int = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' ) # load pretrained model __snake_case : str = load_gpta('gpt2' ).to(A ) print('computing perplexity on objective set' ) __snake_case : List[str] = compute_perplexity(A , A , A ).item() print('perplexity on objective set:' , A ) # collect igf pairs and save to file demo.jbl collect_objective_set(A , A , A , A , A , A , A , A ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( A : Tuple , A : Optional[int]=15 , A : List[Any]=1_28 , A : Tuple=1_00 , A : Tuple="igf_model.pt" , ) -> Dict: """simple docstring""" set_seed(42 ) # Load pre-trained model __snake_case : Dict = GPTaLMHeadModel.from_pretrained('gpt2' ) # Initialize secondary learner to use embedding weights of model __snake_case : List[Any] = SecondaryLearner(A ) # Train secondary learner __snake_case : str = train_secondary_learner( A , A , max_epochs=A , batch_size=A , eval_freq=1_00 , igf_model_path=A , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : List[str] , A : Dict , A : Any=32 , A : Dict=10_00 , A : List[Any]=16 , A : Dict=1.0 , A : Union[str, Any]=recopy_gpta , A : int=None , A : Optional[int]=10 , A : Tuple="gpt2_finetuned.pt" , ) -> Dict: """simple docstring""" __snake_case : Optional[int] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' ) __snake_case : Any = RandomSampler(A ) __snake_case : List[Any] = DataLoader(A , sampler=A ) __snake_case : int = max_steps // (len(A )) + 1 __snake_case : Union[str, Any] = 0 __snake_case : List[str] = torch.zeros((1, context_len) , dtype=torch.long , device=A ) __snake_case : str = recopy_model(A , A , A ) model.train() if secondary_learner is not None: secondary_learner.to(A ) secondary_learner.eval() __snake_case : Dict = [] __snake_case : Dict = 0 __snake_case : Any = [] __snake_case : Optional[int] = [] # Compute the performance of the transformer model at the beginning __snake_case : List[str] = compute_perplexity(A , A , A ) test_perps.append(A ) print('Test perplexity, step' , A , ':' , A ) for epoch in range(int(A ) ): for step, example in enumerate(A ): torch.cuda.empty_cache() __snake_case : Optional[int] = random.randint(0 , example.size(2 ) - context_len - 1 ) __snake_case : int = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() __snake_case : Any = model(A , labels=A ) __snake_case : Tuple = True if secondary_learner is not None: __snake_case : Optional[int] = secondary_learner.forward( torch.tensor(A , dtype=torch.long , device=A ).unsqueeze(0 ) )[0].item() observed_qs.append(float(A ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: __snake_case : Dict = -1 if predicted_q < threshold: __snake_case : Tuple = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) __snake_case : Optional[int] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() __snake_case : Any = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: __snake_case : Tuple = compute_perplexity(A , A , A ) test_perps.append(A ) print('Test perplexity, step' , A , ':' , A ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , A ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def _SCREAMING_SNAKE_CASE ( ) -> Dict: """simple docstring""" __snake_case : str = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' ) # Required parameters parser.add_argument( '--data_dir' , default=A , type=A , required=A , help='The input data dir. Should contain data files for WikiText.' , ) parser.add_argument( '--model_name_or_path' , default=A , type=A , required=A , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--data_file' , type=A , default=A , help=( 'A jbl file containing tokenized data which can be split as objective dataset, ' 'train_dataset and test_dataset.' ) , ) parser.add_argument( '--igf_data_file' , type=A , default=A , help='A jbl file containing the context and information gain pairs to train secondary learner.' , ) parser.add_argument( '--output_dir' , default=A , type=A , required=A , help='The output directory where the final fine-tuned model is stored.' , ) parser.add_argument( '--tokenizer_name' , default=A , type=A , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument('--seed' , type=A , default=A , help='A seed for reproducible training.' ) parser.add_argument( '--context_len' , default=32 , type=A , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--size_objective_set' , default=1_00 , type=A , help='number of articles that are long enough to be used as our objective set' , ) parser.add_argument( '--eval_freq' , default=1_00 , type=A , help='secondary model evaluation is triggered at eval_freq' ) parser.add_argument('--max_steps' , default=10_00 , type=A , help='To calculate training epochs' ) parser.add_argument( '--secondary_learner_batch_size' , default=1_28 , type=A , help='batch size of training data for secondary learner' , ) parser.add_argument( '--batch_size' , default=16 , type=A , help='batch size of training data of language model(gpt2) ' ) parser.add_argument( '--eval_interval' , default=10 , type=A , help=( 'decay the selectivity of our secondary learner filter from' '1 standard deviation above average to 1 below average after 10 batches' ) , ) parser.add_argument( '--number' , default=1_00 , type=A , help='The number of examples split to be used as objective_set/test_data' ) parser.add_argument( '--min_len' , default=10_26 , type=A , help='The minimum length of the article to be used as objective set' ) parser.add_argument( '--secondary_learner_max_epochs' , default=15 , type=A , help='number of epochs to train secondary learner' ) parser.add_argument('--trim' , default=A , type=A , help='truncate the example if it exceeds context length' ) parser.add_argument( '--threshold' , default=1.0 , type=A , help=( 'The threshold value used by secondary learner to filter the train_data and allow only' ' informative data as input to the model' ) , ) parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=A , help='finetuned_model_name' ) parser.add_argument( '--recopy_model' , default=A , type=A , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=A , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , ) # Load train data for secondary learner __snake_case : Tuple = joblib.load('data/IGF_values.jbl' ) # Train secondary learner __snake_case : Tuple = training_secondary_learner( A , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='igf_model.pt' , ) # load pretrained gpt2 model __snake_case : List[Any] = GPTaLMHeadModel.from_pretrained('gpt2' ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model __snake_case : Dict = generate_datasets( context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=1_00 , min_len=10_26 , trim=A ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( A , A , A , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=A , secondary_learner=A , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , ) if __name__ == "__main__": main()
703
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A = { '''configuration_distilbert''': [ '''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DistilBertConfig''', '''DistilBertOnnxConfig''', ], '''tokenization_distilbert''': ['''DistilBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''DistilBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DistilBertForMaskedLM''', '''DistilBertForMultipleChoice''', '''DistilBertForQuestionAnswering''', '''DistilBertForSequenceClassification''', '''DistilBertForTokenClassification''', '''DistilBertModel''', '''DistilBertPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDistilBertForMaskedLM''', '''TFDistilBertForMultipleChoice''', '''TFDistilBertForQuestionAnswering''', '''TFDistilBertForSequenceClassification''', '''TFDistilBertForTokenClassification''', '''TFDistilBertMainLayer''', '''TFDistilBertModel''', '''TFDistilBertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''FlaxDistilBertForMaskedLM''', '''FlaxDistilBertForMultipleChoice''', '''FlaxDistilBertForQuestionAnswering''', '''FlaxDistilBertForSequenceClassification''', '''FlaxDistilBertForTokenClassification''', '''FlaxDistilBertModel''', '''FlaxDistilBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A = { '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
704
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __A = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def _SCREAMING_SNAKE_CASE ( A : Tuple ) -> str: """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(A ) def _SCREAMING_SNAKE_CASE ( A : int ) -> Optional[int]: """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main __snake_case : Any = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(A , id=A )
61
0
'''simple docstring''' import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = '''https://openaipublic.azureedge.net/jukebox/models/''' __A = { '''jukebox-1b-lyrics''': [ '''5b/vqvae.pth.tar''', '''5b/prior_level_0.pth.tar''', '''5b/prior_level_1.pth.tar''', '''1b_lyrics/prior_level_2.pth.tar''', ], '''jukebox-5b-lyrics''': [ '''5b/vqvae.pth.tar''', '''5b/prior_level_0.pth.tar''', '''5b/prior_level_1.pth.tar''', '''5b_lyrics/prior_level_2.pth.tar''', ], } def _SCREAMING_SNAKE_CASE ( A : Dict ) -> Optional[Any]: """simple docstring""" if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10: __snake_case : Union[str, Any] = key.replace('.model.1.bias' , '.conv1d_1.bias' ) elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10: __snake_case : Optional[int] = key.replace('.model.1.weight' , '.conv1d_1.weight' ) elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10: __snake_case : Optional[Any] = key.replace('.model.3.bias' , '.conv1d_2.bias' ) elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10: __snake_case : Dict = key.replace('.model.3.weight' , '.conv1d_2.weight' ) if "conditioner_blocks.0." in key: __snake_case : Optional[int] = key.replace('conditioner_blocks.0' , 'conditioner_blocks' ) if "prime_prior" in key: __snake_case : int = key.replace('prime_prior' , 'encoder' ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: __snake_case : Optional[Any] = key.replace('.emb.' , '.' ) if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook return key.replace('.k' , '.codebook' ) if "y_emb." in key: return key.replace('y_emb.' , 'metadata_embedding.' ) if "x_emb.emb." in key: __snake_case : int = key.replace('0.x_emb.emb' , 'embed_tokens' ) if "prime_state_ln" in key: return key.replace('prime_state_ln' , 'encoder.final_layer_norm' ) if ".ln" in key: return key.replace('.ln' , '.layer_norm' ) if "_ln" in key: return key.replace('_ln' , '_layer_norm' ) if "prime_state_proj" in key: return key.replace('prime_state_proj' , 'encoder.proj_in' ) if "prime_x_out" in key: return key.replace('prime_x_out' , 'encoder.lm_head' ) if "prior.x_out" in key: return key.replace('x_out' , 'fc_proj_out' ) if "x_emb" in key: return key.replace('x_emb' , 'embed_tokens' ) return key def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : Tuple , A : int , A : Union[str, Any] ) -> List[Any]: """simple docstring""" __snake_case : str = {} import re __snake_case : Any = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' ) __snake_case : Union[str, Any] = re.compile( R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) __snake_case : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' ) __snake_case : Union[str, Any] = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' ) __snake_case : List[Any] = re.compile( R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) __snake_case : Any = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' ) __snake_case : Tuple = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' ) __snake_case : List[str] = re.compile( R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) __snake_case : str = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(A ): __snake_case : Optional[Any] = re_encoder_block_conv_in.match(A ) __snake_case : Optional[int] = regex_match.groups() __snake_case : Optional[int] = int(groups[2] ) * 2 + int(groups[3] ) __snake_case : Dict = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}""" __snake_case : Dict = re_encoder_block_conv_in.sub(A , A ) elif re_encoder_block_resnet.fullmatch(A ): __snake_case : str = re_encoder_block_resnet.match(A ) __snake_case : Tuple = regex_match.groups() __snake_case : List[str] = int(groups[2] ) * 2 + int(groups[3] ) __snake_case : Optional[Any] = {'1': 1, '3': 2}[groups[-2]] __snake_case : str = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.""" __snake_case : List[Any] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}""" __snake_case : Tuple = prefix + resnet_block __snake_case : Optional[Any] = re_encoder_block_resnet.sub(A , A ) elif re_encoder_block_proj_out.fullmatch(A ): __snake_case : Tuple = re_encoder_block_proj_out.match(A ) __snake_case : Optional[Any] = regex_match.groups() __snake_case : Optional[int] = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}""" __snake_case : Union[str, Any] = re_encoder_block_proj_out.sub(A , A ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(A ): __snake_case : Dict = re_decoder_block_conv_out.match(A ) __snake_case : Union[str, Any] = regex_match.groups() __snake_case : Tuple = int(groups[2] ) * 2 + int(groups[3] ) - 2 __snake_case : List[str] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}""" __snake_case : str = re_decoder_block_conv_out.sub(A , A ) elif re_decoder_block_resnet.fullmatch(A ): __snake_case : Any = re_decoder_block_resnet.match(A ) __snake_case : int = regex_match.groups() __snake_case : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2 __snake_case : int = {'1': 1, '3': 2}[groups[-2]] __snake_case : Optional[int] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.""" __snake_case : Dict = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}""" __snake_case : Union[str, Any] = prefix + resnet_block __snake_case : Any = re_decoder_block_resnet.sub(A , A ) elif re_decoder_block_proj_in.fullmatch(A ): __snake_case : int = re_decoder_block_proj_in.match(A ) __snake_case : Optional[Any] = regex_match.groups() __snake_case : Optional[int] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}""" __snake_case : Optional[int] = re_decoder_block_proj_in.sub(A , A ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(A ): __snake_case : str = re_prior_cond_conv_out.match(A ) __snake_case : Optional[int] = regex_match.groups() __snake_case : List[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2 __snake_case : Tuple = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}""" __snake_case : Tuple = re_prior_cond_conv_out.sub(A , A ) elif re_prior_cond_resnet.fullmatch(A ): __snake_case : Dict = re_prior_cond_resnet.match(A ) __snake_case : Optional[int] = regex_match.groups() __snake_case : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2 __snake_case : Any = {'1': 1, '3': 2}[groups[-2]] __snake_case : Any = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.""" __snake_case : List[str] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}""" __snake_case : int = prefix + resnet_block __snake_case : List[str] = re_prior_cond_resnet.sub(A , A ) elif re_prior_cond_proj_in.fullmatch(A ): __snake_case : Optional[Any] = re_prior_cond_proj_in.match(A ) __snake_case : str = regex_match.groups() __snake_case : Optional[Any] = F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}""" __snake_case : Tuple = re_prior_cond_proj_in.sub(A , A ) # keep original key else: __snake_case : int = original_key __snake_case : Optional[int] = replace_key(A ) if F"""{key_prefix}.{key}""" not in model_state_dict or key is None: print(F"""failed converting {original_key} to {key}, does not match""" ) # handle missmatched shape elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape: __snake_case : str = model_state_dict[F"""{key_prefix}.{key}"""] print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" ) __snake_case : List[str] = original_key __snake_case : List[Any] = original_key __snake_case : Union[str, Any] = value return new_dict @torch.no_grad() def _SCREAMING_SNAKE_CASE ( A : str=None , A : int=None ) -> str: """simple docstring""" for file in MODEL_MAPPING[model_name]: if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ): __snake_case : Union[str, Any] = requests.get(F"""{PREFIX}{file}""" , allow_redirects=A ) os.makedirs(F"""{pytorch_dump_folder_path}/""" , exist_ok=A ) open(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , 'wb' ).write(r.content ) __snake_case : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]] __snake_case : Any = JukeboxConfig.from_pretrained(A ) __snake_case : Optional[Any] = JukeboxModel(A ) __snake_case : Optional[int] = [] __snake_case : Union[str, Any] = {} for i, dict_name in enumerate(A ): __snake_case : List[Any] = torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )['model'] __snake_case : List[Any] = {} for k in old_dic.keys(): if k.endswith('.b' ): __snake_case : Any = old_dic[k] elif k.endswith('.w' ): __snake_case : List[Any] = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: __snake_case : str = old_dic[k] else: __snake_case : List[Any] = old_dic[k] __snake_case : Union[str, Any] = 'vqvae' if i == 0 else F"""priors.{3 - i}""" __snake_case : Any = fix_jukebox_keys(A , model.state_dict() , A , A ) weight_dict.append(A ) __snake_case : Any = weight_dict.pop(0 ) model.vqvae.load_state_dict(A ) for i in range(len(A ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(A ).mkdir(exist_ok=A ) with open(F"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile: json.dump(A , A ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(A ) return weight_dict if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''jukebox-5b-lyrics''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''jukebox-5b-lyrics-converted''', type=str, help='''Path to the output PyTorch model directory.''', ) __A = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
705
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A = { '''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''], '''tokenization_biogpt''': ['''BioGptTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BioGptForCausalLM''', '''BioGptForTokenClassification''', '''BioGptForSequenceClassification''', '''BioGptModel''', '''BioGptPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
0
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class a_ : def __init__(self , __a , __a=1_3 , __a=3_0 , __a=2 , __a=3 , __a=True , __a=True , __a=3_2 , __a=2 , __a=4 , __a=3_7 , __a="gelu" , __a=0.1 , __a=0.1 , __a=1_0 , __a=0.02 , __a=3 , __a=None , __a=2 , ) -> int: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Tuple = batch_size __snake_case : Union[str, Any] = image_size __snake_case : List[str] = patch_size __snake_case : Tuple = num_channels __snake_case : List[str] = is_training __snake_case : Tuple = use_labels __snake_case : Dict = hidden_size __snake_case : Dict = num_hidden_layers __snake_case : int = num_attention_heads __snake_case : Dict = intermediate_size __snake_case : Any = hidden_act __snake_case : Optional[int] = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : str = type_sequence_label_size __snake_case : Union[str, Any] = initializer_range __snake_case : Optional[int] = scope __snake_case : List[Any] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) __snake_case : Optional[Any] = (image_size // patch_size) ** 2 __snake_case : int = num_patches + 2 def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __snake_case : str = None if self.use_labels: __snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size) __snake_case : List[str] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> List[Any]: """simple docstring""" __snake_case : Any = TFDeiTModel(config=__a) __snake_case : Tuple = model(__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> int: """simple docstring""" __snake_case : Optional[Any] = TFDeiTForMaskedImageModeling(config=__a) __snake_case : int = model(__a) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size)) # test greyscale images __snake_case : Optional[int] = 1 __snake_case : Any = TFDeiTForMaskedImageModeling(__a) __snake_case : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __snake_case : Any = model(__a) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size)) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> List[str]: """simple docstring""" __snake_case : Any = self.type_sequence_label_size __snake_case : Dict = TFDeiTForImageClassification(__a) __snake_case : Union[str, Any] = model(__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images __snake_case : Dict = 1 __snake_case : List[Any] = TFDeiTForImageClassification(__a) __snake_case : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __snake_case : Dict = model(__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : List[Any] = self.prepare_config_and_inputs() __snake_case : Tuple = config_and_inputs __snake_case : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) _snake_case = ( { """feature-extraction""": TFDeiTModel, """image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) _snake_case = False _snake_case = False _snake_case = False _snake_case = False def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : Any = TFDeiTModelTester(self) __snake_case : Tuple = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=3_7) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds') def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : List[Any] = model_class(__a) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer)) __snake_case : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , tf.keras.layers.Dense)) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : List[str] = model_class(__a) __snake_case : str = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Union[str, Any] = [*signature.parameters.keys()] __snake_case : str = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a) def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a=False) -> List[str]: """simple docstring""" __snake_case : List[str] = super()._prepare_for_class(__a , __a , return_labels=__a) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters: del inputs_dict["labels"] return inputs_dict @slow def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : List[str] = TFDeiTModel.from_pretrained(__a) self.assertIsNotNone(__a) def _SCREAMING_SNAKE_CASE ( ) -> Any: """simple docstring""" __snake_case : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class a_ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : str = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224') __snake_case : List[str] = self.default_image_processor __snake_case : Any = prepare_img() __snake_case : List[str] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : Tuple = model(**__a) # verify the logits __snake_case : List[Any] = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : List[str] = tf.constant([-1.0_266, 0.1_912, -1.2_861]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4))
706
'''simple docstring''' from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> int: """simple docstring""" if not is_accelerate_available(): return method __snake_case : Optional[Any] = version.parse(accelerate.__version__ ).base_version if version.parse(A ) < version.parse('0.17.0' ): return method def wrapper(self : Optional[Any] , *A : Optional[Any] , **A : Optional[int] ): if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ): self._hf_hook.pre_forward(self ) return method(self , *A , **A ) return wrapper
61
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor __A = logging.get_logger(__name__) class a_ ( UpperCamelCase_ ): def __init__(self , *__a , **__a) -> None: """simple docstring""" warnings.warn( 'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use FlavaImageProcessor instead.' , __a , ) super().__init__(*__a , **__a)
707
'''simple docstring''' import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class a_ ( unittest.TestCase , UpperCamelCase_ ): def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[str] = load_tool('text-to-speech') self.tool.setup() def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" torch.manual_seed(0) __snake_case : Dict = self.tool('hey') __snake_case : List[Any] = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , )) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" torch.manual_seed(0) __snake_case : Any = self.tool('hey') __snake_case : Any = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , ))
61
0
'''simple docstring''' import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = OpenAIGPTTokenizer _snake_case = OpenAIGPTTokenizerFast _snake_case = True _snake_case = False def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __snake_case : Dict = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] __snake_case : Optional[int] = dict(zip(__a , range(len(__a)))) __snake_case : Tuple = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', ''] __snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) __snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w') as fp: fp.write(json.dumps(__a)) with open(self.merges_file , 'w') as fp: fp.write('\n'.join(__a)) def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]: """simple docstring""" return "lower newer", "lower newer" def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : str = OpenAIGPTTokenizer(self.vocab_file , self.merges_file) __snake_case : Dict = 'lower' __snake_case : List[Any] = ['low', 'er</w>'] __snake_case : Dict = tokenizer.tokenize(__a) self.assertListEqual(__a , __a) __snake_case : int = tokens + ['<unk>'] __snake_case : int = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , __a) def SCREAMING_SNAKE_CASE__ (self , __a=1_5) -> Optional[Any]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""): __snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(__a , **__a) # Simple input __snake_case : Optional[Any] = 'This is a simple input' __snake_case : Optional[Any] = ['This is a simple input 1', 'This is a simple input 2'] __snake_case : Union[str, Any] = ('This is a simple input', 'This is a pair') __snake_case : Dict = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length') # Simple input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length') # Simple input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , ) # Pair input self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length') # Pair input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length') # Pair input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , ) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" pass @require_ftfy @require_spacy @require_tokenizers class a_ ( UpperCamelCase_ ): pass
708
'''simple docstring''' import math class a_ : def __init__(self , __a=0) -> Any: # a graph with Node 0,1,...,N-1 """simple docstring""" __snake_case : List[str] = n __snake_case : Tuple = [ [math.inf for j in range(0 , __a)] for i in range(0 , __a) ] # adjacency matrix for weight __snake_case : Union[str, Any] = [ [math.inf for j in range(0 , __a)] for i in range(0 , __a) ] # dp[i][j] stores minimum distance from i to j def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple: """simple docstring""" __snake_case : Union[str, Any] = w def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" for k in range(0 , self.n): for i in range(0 , self.n): for j in range(0 , self.n): __snake_case : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j]) def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]: """simple docstring""" return self.dp[u][v] if __name__ == "__main__": __A = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 1_0) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 1_0) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
61
0
'''simple docstring''' import numpy as np import qiskit def _SCREAMING_SNAKE_CASE ( A : int = 8 , A : int | None = None ) -> str: """simple docstring""" __snake_case : str = np.random.default_rng(seed=A ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. __snake_case : Optional[int] = 6 * key_len # Measurement basis for Alice's qubits. __snake_case : str = rng.integers(2 , size=A ) # The set of states Alice will prepare. __snake_case : Dict = rng.integers(2 , size=A ) # Measurement basis for Bob's qubits. __snake_case : Tuple = rng.integers(2 , size=A ) # Quantum Circuit to simulate BB84 __snake_case : List[str] = qiskit.QuantumCircuit(A , name='BB84' ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(A ): if alice_state[index] == 1: bbaa_circ.x(A ) if alice_basis[index] == 1: bbaa_circ.h(A ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(A ): if bob_basis[index] == 1: bbaa_circ.h(A ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. __snake_case : List[str] = qiskit.Aer.get_backend('aer_simulator' ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. __snake_case : List[Any] = qiskit.execute(A , A , shots=1 , seed_simulator=A ) # Returns the result of measurement. __snake_case : List[Any] = job.result().get_counts(A ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. __snake_case : Optional[Any] = ''.join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( A , A , A ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. __snake_case : Any = gen_key[:key_len] if len(A ) >= key_len else gen_key.ljust(A , '0' ) return key if __name__ == "__main__": print(f'''The generated key is : {bbaa(8, seed=0)}''') from doctest import testmod testmod()
709
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __A = logging.get_logger(__name__) class a_ ( UpperCamelCase_ ): _snake_case = ["""pixel_values"""] def __init__(self , __a = True , __a = None , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = 1 / 2_5_5 , __a = True , __a = None , __a = None , **__a , ) -> None: """simple docstring""" super().__init__(**__a) __snake_case : Tuple = size if size is not None else {'shortest_edge': 3_8_4} __snake_case : List[Any] = get_size_dict(__a , default_to_square=__a) __snake_case : int = do_resize __snake_case : List[str] = size # Default value set here for backwards compatibility where the value in config is None __snake_case : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 __snake_case : Tuple = resample __snake_case : Dict = do_rescale __snake_case : Any = rescale_factor __snake_case : str = do_normalize __snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __snake_case : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray: """simple docstring""" __snake_case : Dict = get_size_dict(__a , default_to_square=__a) if "shortest_edge" not in size: raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""") __snake_case : List[str] = size['shortest_edge'] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __snake_case : Any = int(shortest_edge / crop_pct) __snake_case : Any = get_resize_output_image_size(__a , size=__a , default_to_square=__a) __snake_case : int = resize(image=__a , size=__a , resample=__a , data_format=__a , **__a) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__a , size=(shortest_edge, shortest_edge) , data_format=__a , **__a) else: # warping (no cropping) when evaluated at 384 or larger return resize( __a , size=(shortest_edge, shortest_edge) , resample=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , **__a , ) -> Any: """simple docstring""" return rescale(__a , scale=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray: """simple docstring""" return normalize(__a , mean=__a , std=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> PIL.Image.Image: """simple docstring""" __snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize __snake_case : Dict = crop_pct if crop_pct is not None else self.crop_pct __snake_case : Tuple = resample if resample is not None else self.resample __snake_case : Any = do_rescale if do_rescale is not None else self.do_rescale __snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize __snake_case : Optional[int] = image_mean if image_mean is not None else self.image_mean __snake_case : Optional[Any] = image_std if image_std is not None else self.image_std __snake_case : List[str] = size if size is not None else self.size __snake_case : Any = get_size_dict(__a , default_to_square=__a) __snake_case : Dict = make_list_of_images(__a) if not valid_images(__a): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.') if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # All transformations expect numpy arrays. __snake_case : Tuple = [to_numpy_array(__a) for image in images] if do_resize: __snake_case : Optional[int] = [self.resize(image=__a , size=__a , crop_pct=__a , resample=__a) for image in images] if do_rescale: __snake_case : Optional[int] = [self.rescale(image=__a , scale=__a) for image in images] if do_normalize: __snake_case : Any = [self.normalize(image=__a , mean=__a , std=__a) for image in images] __snake_case : Dict = [to_channel_dimension_format(__a , __a) for image in images] __snake_case : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=__a , tensor_type=__a)
61
0
def _SCREAMING_SNAKE_CASE ( A : Any ) -> int: """simple docstring""" __snake_case : Any = [0] * len(A ) __snake_case : List[str] = [] __snake_case : List[str] = [1] * len(A ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(A ) ): if indegree[i] == 0: queue.append(A ) while queue: __snake_case : Optional[Any] = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: __snake_case : int = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(A ) print(max(A ) ) # Adjacency list of Graph __A = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
710
'''simple docstring''' from functools import lru_cache @lru_cache def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" if num < 0: raise ValueError('Number should not be negative.' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
61
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __A = { '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
711
'''simple docstring''' import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = VQModel _snake_case = """sample""" @property def SCREAMING_SNAKE_CASE__ (self , __a=(3_2, 3_2)) -> str: """simple docstring""" __snake_case : Dict = 4 __snake_case : Optional[int] = 3 __snake_case : str = floats_tensor((batch_size, num_channels) + sizes).to(__a) return {"sample": image} @property def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" return (3, 3_2, 3_2) @property def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" return (3, 3_2, 3_2) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = { 'block_out_channels': [3_2, 6_4], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 3, } __snake_case : List[Any] = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case ,__snake_case : List[Any] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=__a) self.assertIsNotNone(__a) self.assertEqual(len(loading_info['missing_keys']) , 0) model.to(__a) __snake_case : Any = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = VQModel.from_pretrained('fusing/vqgan-dummy') model.to(__a).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) __snake_case : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size) __snake_case : Optional[int] = image.to(__a) with torch.no_grad(): __snake_case : List[Any] = model(__a).sample __snake_case : int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off __snake_case : int = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143]) # fmt: on self.assertTrue(torch.allclose(__a , __a , atol=1E-3))
61
0
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = XLNetTokenizer _snake_case = XLNetTokenizerFast _snake_case = True _snake_case = True def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __snake_case : int = XLNetTokenizer(__a , keep_accents=__a) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[int] = '<s>' __snake_case : Tuple = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a) , __a) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a) , __a) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Tuple = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<unk>') self.assertEqual(vocab_keys[1] , '<s>') self.assertEqual(vocab_keys[-1] , '<eod>') self.assertEqual(len(__a) , 1_0_0_6) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0) def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" __snake_case : Dict = XLNetTokenizer(__a , keep_accents=__a) __snake_case : List[Any] = tokenizer.tokenize('This is a test') self.assertListEqual(__a , ['▁This', '▁is', '▁a', '▁t', 'est']) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]) __snake_case : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( __a , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) __snake_case : Tuple = tokenizer.convert_tokens_to_ids(__a) self.assertListEqual(__a , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4]) __snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(__a) self.assertListEqual( __a , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" __snake_case : List[Any] = XLNetTokenizer(__a , do_lower_case=__a) __snake_case : Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( __a , [ SPIECE_UNDERLINE + '', 'i', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 'se', '.', ] , ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['▁he', 'll', 'o']) def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" __snake_case : int = XLNetTokenizer(__a , do_lower_case=__a) __snake_case : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( __a , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 'se', '.', ] , ) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" __snake_case : List[Any] = XLNetTokenizer.from_pretrained('xlnet-base-cased') __snake_case : List[str] = tokenizer.encode('sequence builders' , add_special_tokens=__a) __snake_case : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a) __snake_case : Dict = tokenizer.build_inputs_with_special_tokens(__a) __snake_case : Dict = tokenizer.build_inputs_with_special_tokens(__a , __a) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" __snake_case : List[str] = {'input_ids': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__a , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
712
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __A = logging.getLogger(__name__) @dataclass class a_ : _snake_case = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) _snake_case = field( default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) _snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class a_ : _snake_case = field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , ) _snake_case = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case ,__snake_case ,__snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case ,__snake_case ,__snake_case : int = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ' --overwrite_output_dir to overcome.' ) __snake_case : List[str] = import_module('tasks' ) try: __snake_case : Any = getattr(A , model_args.task_type ) __snake_case : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , A ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task __snake_case : Optional[Any] = token_classification_task.get_labels(data_args.labels ) __snake_case : Dict[int, str] = dict(enumerate(A ) ) __snake_case : Optional[Any] = len(A ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , ) __snake_case : List[str] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) __snake_case : Optional[int] = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , ) # Get datasets __snake_case : List[Any] = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __snake_case : int = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(A : np.ndarray , A : np.ndarray ) -> Tuple[List[int], List[int]]: __snake_case : str = np.argmax(A , axis=2 ) __snake_case ,__snake_case : int = preds.shape __snake_case : Dict = [[] for _ in range(A )] __snake_case : Union[str, Any] = [[] for _ in range(A )] for i in range(A ): for j in range(A ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(A : EvalPrediction ) -> Dict: __snake_case ,__snake_case : Any = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(A , A ), "precision": precision_score(A , A ), "recall": recall_score(A , A ), "f1": fa_score(A , A ), } # Data collator __snake_case : Optional[int] = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __snake_case : Optional[Any] = Trainer( model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __snake_case : List[Any] = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __snake_case : List[str] = trainer.evaluate() __snake_case : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) results.update(A ) # Predict if training_args.do_predict: __snake_case : str = TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) __snake_case ,__snake_case ,__snake_case : str = trainer.predict(A ) __snake_case ,__snake_case : List[str] = align_predictions(A , A ) __snake_case : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) # Save predictions __snake_case : List[str] = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(A , A , A ) return results def _SCREAMING_SNAKE_CASE ( A : int ) -> Any: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
61
0
'''simple docstring''' from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __A = 1.054_571_817E-34 # unit of ℏ : J * s __A = 3E8 # unit of c : m * s^-1 def _SCREAMING_SNAKE_CASE ( A : float , A : float , A : float ) -> dict[str, float]: """simple docstring""" if (force, area, distance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if force < 0: raise ValueError('Magnitude of force can not be negative' ) if distance < 0: raise ValueError('Distance can not be negative' ) if area < 0: raise ValueError('Area can not be negative' ) if force == 0: __snake_case : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_40 * (distance) ** 4 ) return {"force": force} elif area == 0: __snake_case : Tuple = (2_40 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: __snake_case : List[str] = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('One and only one argument must be 0' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
713
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : list ) -> list: """simple docstring""" __snake_case : Tuple = False while is_sorted is False: # Until all the indices are traversed keep looping __snake_case : Optional[Any] = True for i in range(0 , len(A ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: __snake_case ,__snake_case : int = input_list[i + 1], input_list[i] # swapping if elements not in order __snake_case : List[Any] = False for i in range(1 , len(A ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: __snake_case ,__snake_case : Tuple = input_list[i + 1], input_list[i] # swapping if elements not in order __snake_case : Any = False return input_list if __name__ == "__main__": print('''Enter list to be sorted''') __A = [int(x) for x in input().split()] # inputing elements of the list in one line __A = odd_even_sort(input_list) print('''The sorted list is''') print(sorted_list)
61
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { '''facebook/data2vec-vision-base-ft''': ( '''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json''' ), } class a_ ( UpperCamelCase_ ): _snake_case = """data2vec-vision""" def __init__(self , __a=7_6_8 , __a=1_2 , __a=1_2 , __a=3_0_7_2 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1E-12 , __a=2_2_4 , __a=1_6 , __a=3 , __a=False , __a=False , __a=False , __a=False , __a=0.1 , __a=0.1 , __a=True , __a=[3, 5, 7, 1_1] , __a=[1, 2, 3, 6] , __a=True , __a=0.4 , __a=2_5_6 , __a=1 , __a=False , __a=2_5_5 , **__a , ) -> Dict: """simple docstring""" super().__init__(**__a) __snake_case : Dict = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : Dict = num_attention_heads __snake_case : Union[str, Any] = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : Any = hidden_dropout_prob __snake_case : Tuple = attention_probs_dropout_prob __snake_case : str = initializer_range __snake_case : Optional[int] = layer_norm_eps __snake_case : List[str] = image_size __snake_case : Tuple = patch_size __snake_case : str = num_channels __snake_case : Tuple = use_mask_token __snake_case : int = use_absolute_position_embeddings __snake_case : Dict = use_relative_position_bias __snake_case : int = use_shared_relative_position_bias __snake_case : Optional[Any] = layer_scale_init_value __snake_case : int = drop_path_rate __snake_case : Union[str, Any] = use_mean_pooling # decode head attributes (semantic segmentation) __snake_case : int = out_indices __snake_case : Union[str, Any] = pool_scales # auxiliary head attributes (semantic segmentation) __snake_case : Union[str, Any] = use_auxiliary_head __snake_case : int = auxiliary_loss_weight __snake_case : Tuple = auxiliary_channels __snake_case : Union[str, Any] = auxiliary_num_convs __snake_case : Optional[int] = auxiliary_concat_input __snake_case : Tuple = semantic_loss_ignore_index class a_ ( UpperCamelCase_ ): _snake_case = version.parse("""1.11""" ) @property def SCREAMING_SNAKE_CASE__ (self) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def SCREAMING_SNAKE_CASE__ (self) -> float: """simple docstring""" return 1E-4
714
'''simple docstring''' import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger() def _SCREAMING_SNAKE_CASE ( A : int , A : str , A : LevitConfig , A : Path , A : bool = True ) -> Dict: """simple docstring""" print(F"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 1_28: if name[-1] == "S": __snake_case : Optional[int] = timm.create_model('levit_128s' , pretrained=A ) else: __snake_case : Tuple = timm.create_model('levit_128' , pretrained=A ) if hidden_sizes == 1_92: __snake_case : int = timm.create_model('levit_192' , pretrained=A ) if hidden_sizes == 2_56: __snake_case : List[Any] = timm.create_model('levit_256' , pretrained=A ) if hidden_sizes == 3_84: __snake_case : int = timm.create_model('levit_384' , pretrained=A ) from_model.eval() __snake_case : str = LevitForImageClassificationWithTeacher(A ).eval() __snake_case : int = OrderedDict() __snake_case : Optional[Any] = from_model.state_dict() __snake_case : Tuple = list(from_model.state_dict().keys() ) __snake_case : List[str] = list(our_model.state_dict().keys() ) print(len(A ) , len(A ) ) for i in range(len(A ) ): __snake_case : Optional[int] = weights[og_keys[i]] our_model.load_state_dict(A ) __snake_case : Tuple = torch.randn((2, 3, 2_24, 2_24) ) __snake_case : Union[str, Any] = from_model(A ) __snake_case : List[str] = our_model(A ).logits assert torch.allclose(A , A ), "The model logits don't match the original one." __snake_case : int = name print(A ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __snake_case : int = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F"""Pushed {checkpoint_name}""" ) def _SCREAMING_SNAKE_CASE ( A : Path , A : str = None , A : bool = True ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = 'imagenet-1k-id2label.json' __snake_case : Tuple = 10_00 __snake_case : Dict = (1, num_labels) __snake_case : List[str] = 'huggingface/label-files' __snake_case : Any = num_labels __snake_case : str = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) ) __snake_case : Any = {int(A ): v for k, v in idalabel.items()} __snake_case : int = idalabel __snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()} __snake_case : Optional[int] = partial(A , num_labels=A , idalabel=A , labelaid=A ) __snake_case : Dict = { 'levit-128S': 1_28, 'levit-128': 1_28, 'levit-192': 1_92, 'levit-256': 2_56, 'levit-384': 3_84, } __snake_case : Union[str, Any] = { 'levit-128S': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-128': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-192': ImageNetPreTrainedConfig( hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-256': ImageNetPreTrainedConfig( hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-384': ImageNetPreTrainedConfig( hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , A , names_to_config[model_name] , A , A ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , A , A , A , A ) return config, expected_shape if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''levit-dump-folder/''', type=Path, required=False, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) __A = parser.parse_args() __A = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
61
0
'''simple docstring''' import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py __A = '''\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation", author = "Lin, Chin-Yew and Och, Franz Josef", booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics", month = "aug 23{--}aug 27", year = "2004", address = "Geneva, Switzerland", publisher = "COLING", url = "https://www.aclweb.org/anthology/C04-1072", pages = "501--507", } ''' __A = '''\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation, the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. ''' __A = ''' Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: \'bleu\': bleu score, \'precisions\': geometric mean of n-gram precisions, \'brevity_penalty\': brevity penalty, \'length_ratio\': ratio of lengths, \'translation_length\': translation_length, \'reference_length\': reference_length Examples: >>> predictions = [ ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample ... ] >>> references = [ ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references) ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric("bleu") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results["bleu"]) 1.0 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a_ ( datasets.Metric ): def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'), }) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[ 'https://en.wikipedia.org/wiki/BLEU', 'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213', ] , ) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a=4 , __a=False) -> Any: """simple docstring""" __snake_case : Dict = compute_bleu( reference_corpus=__a , translation_corpus=__a , max_order=__a , smooth=__a) (__snake_case) : Dict = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
715
'''simple docstring''' import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class a_ : def __init__(self , __a , __a = 1_3 , __a = 6_4 , __a = 2 , __a = 3 , __a = 3 , __a = True , __a = True , __a = 1_2_8 , __a=[1_6, 3_2, 6_4, 1_2_8] , __a = 7 , __a = 4 , __a = 3_7 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 1_0 , __a = 0.02 , __a = 2 , __a = 1 , __a = 1_2_8 , __a = [2, 2, 2, 2] , __a = 2 , __a = 2 , ) -> str: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Optional[int] = batch_size __snake_case : Optional[Any] = image_size __snake_case : Optional[int] = patch_size __snake_case : Optional[Any] = num_channels __snake_case : Optional[Any] = is_training __snake_case : Tuple = use_labels __snake_case : Optional[int] = hidden_size __snake_case : Any = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : Tuple = intermediate_size __snake_case : List[str] = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : Dict = type_sequence_label_size __snake_case : str = initializer_range __snake_case : int = encoder_stride __snake_case : List[str] = num_attention_outputs __snake_case : Optional[Any] = embed_dim __snake_case : Optional[Any] = embed_dim + 1 __snake_case : List[str] = resolution __snake_case : Optional[int] = depths __snake_case : List[Any] = hidden_sizes __snake_case : List[str] = dim __snake_case : Union[str, Any] = mlp_expansion_ratio def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __snake_case : List[str] = None if self.use_labels: __snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size) __snake_case : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = TFEfficientFormerModel(config=__a) __snake_case : int = model(__a , training=__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple: """simple docstring""" __snake_case : Dict = self.type_sequence_label_size __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : Optional[int] = model(__a , labels=__a , training=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images __snake_case : List[Any] = 1 __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __snake_case : str = model(__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Union[str, Any] = self.prepare_config_and_inputs() __snake_case ,__snake_case ,__snake_case : Union[str, Any] = config_and_inputs __snake_case : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) _snake_case = ( { """feature-extraction""": TFEfficientFormerModel, """image-classification""": ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) _snake_case = False _snake_case = False _snake_case = False _snake_case = False _snake_case = False def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Dict = TFEfficientFormerModelTester(self) __snake_case : List[Any] = ConfigTester( self , config_class=__a , has_text_modality=__a , hidden_size=3_7) def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds') def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings') def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case ,__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[int] = model_class(__a) __snake_case : Union[str, Any] = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Optional[int] = [*signature.parameters.keys()] __snake_case : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" def check_hidden_states_output(__a , __a , __a): __snake_case : str = model_class(__a) __snake_case : List[Any] = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(__a) , __a) if hasattr(self.model_tester , 'encoder_seq_length'): __snake_case : List[Any] = self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length') and self.model_tester.chunk_length > 1: __snake_case : str = seq_length * self.model_tester.chunk_length else: __snake_case : Optional[int] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: __snake_case : List[Any] = outputs.decoder_hidden_states self.asseretIsInstance(__a , (list, tuple)) self.assertEqual(len(__a) , __a) __snake_case : List[str] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'decoder_seq_length' , __a) self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , ) __snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : List[str] = True check_hidden_states_output(__a , __a , __a) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(__a , __a , __a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a=False) -> int: """simple docstring""" __snake_case : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet') def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = TFEfficientFormerModel.from_pretrained(__a) self.assertIsNotNone(__a) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Tuple = True __snake_case : Optional[Any] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'key_length' , __a) __snake_case : Optional[Any] = getattr(self.model_tester , 'chunk_length' , __a) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes'): __snake_case : str = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: __snake_case : Optional[Any] = True __snake_case : Dict = False __snake_case : Optional[int] = True __snake_case : Dict = model_class(__a) __snake_case : Tuple = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Dict = True __snake_case : str = model_class(__a) __snake_case : str = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case ,__snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model __snake_case : Tuple = model_class(__a) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes __snake_case : Optional[Any] = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__a) for key, val in model.input_signature.items() if key in model.dummy_inputs } __snake_case : Tuple = model(__a) self.assertTrue(outputs_dict is not None) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" __snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class a_ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : List[str] = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300') __snake_case : Optional[int] = self.default_image_processor __snake_case : List[Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : List[str] = model(**__a , training=__a) # verify the logits __snake_case : str = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4)) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300') __snake_case : List[Any] = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : Optional[int] = model(**__a , training=__a) # verify the logits __snake_case : Optional[int] = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : List[str] = tf.constant([-0.1_312, 0.4_353, -1.0_499]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4))
61
0
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __A = logging.getLogger(__name__) @dataclass class a_ : _snake_case = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) _snake_case = field( default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) _snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class a_ : _snake_case = field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , ) _snake_case = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" __snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case : int = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ' --overwrite_output_dir to overcome.' ) __snake_case : List[str] = import_module('tasks' ) try: __snake_case : Any = getattr(A , model_args.task_type ) __snake_case : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , A ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task __snake_case : Optional[Any] = token_classification_task.get_labels(data_args.labels ) __snake_case : Dict[int, str] = dict(enumerate(A ) ) __snake_case : Optional[Any] = len(A ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , ) __snake_case : List[str] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) __snake_case : Optional[int] = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , ) # Get datasets __snake_case : List[Any] = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __snake_case : int = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(A : np.ndarray , A : np.ndarray ) -> Tuple[List[int], List[int]]: __snake_case : str = np.argmax(A , axis=2 ) __snake_case : int = preds.shape __snake_case : Dict = [[] for _ in range(A )] __snake_case : Union[str, Any] = [[] for _ in range(A )] for i in range(A ): for j in range(A ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(A : EvalPrediction ) -> Dict: __snake_case : Any = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(A , A ), "precision": precision_score(A , A ), "recall": recall_score(A , A ), "f1": fa_score(A , A ), } # Data collator __snake_case : Optional[int] = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __snake_case : Optional[Any] = Trainer( model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __snake_case : List[Any] = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __snake_case : List[str] = trainer.evaluate() __snake_case : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) results.update(A ) # Predict if training_args.do_predict: __snake_case : str = TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) __snake_case : str = trainer.predict(A ) __snake_case : List[str] = align_predictions(A , A ) __snake_case : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) # Save predictions __snake_case : List[str] = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(A , A , A ) return results def _SCREAMING_SNAKE_CASE ( A : int ) -> Any: """simple docstring""" main() if __name__ == "__main__": main()
716
'''simple docstring''' __A = {str(digit): digit**5 for digit in range(1_0)} def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) ) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" return sum( number for number in range(10_00 , 1_00_00_00 ) if number == digits_fifth_powers_sum(A ) ) if __name__ == "__main__": print(solution())
61
0
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = LEDTokenizer _snake_case = LEDTokenizerFast _snake_case = True def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" super().setUp() __snake_case : Optional[int] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __snake_case : Any = dict(zip(__a , range(len(__a)))) __snake_case : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __snake_case : int = {'unk_token': '<unk>'} __snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) __snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: fp.write(json.dumps(__a) + '\n') with open(self.merges_file , 'w' , encoding='utf-8') as fp: fp.write('\n'.join(__a)) def SCREAMING_SNAKE_CASE__ (self , **__a) -> Union[str, Any]: """simple docstring""" kwargs.update(self.special_tokens_map) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a) def SCREAMING_SNAKE_CASE__ (self , **__a) -> Optional[int]: """simple docstring""" kwargs.update(self.special_tokens_map) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a) def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]: """simple docstring""" return "lower newer", "lower newer" @cached_property def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" return LEDTokenizer.from_pretrained('allenai/led-base-16384') @cached_property def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" return LEDTokenizerFast.from_pretrained('allenai/led-base-16384') @require_torch def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __snake_case : int = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __snake_case : Any = tokenizer(__a , max_length=len(__a) , padding=__a , return_tensors='pt') self.assertIsInstance(__a , __a) self.assertEqual((2, 9) , batch.input_ids.shape) self.assertEqual((2, 9) , batch.attention_mask.shape) __snake_case : List[Any] = batch.input_ids.tolist()[0] self.assertListEqual(__a , __a) @require_torch def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : str = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __snake_case : Tuple = tokenizer(__a , padding=__a , return_tensors='pt') self.assertIn('input_ids' , __a) self.assertIn('attention_mask' , __a) self.assertNotIn('labels' , __a) self.assertNotIn('decoder_attention_mask' , __a) @require_torch def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : str = [ 'Summary of the text.', 'Another summary.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __snake_case : Any = tokenizer(text_target=__a , max_length=3_2 , padding='max_length' , return_tensors='pt') self.assertEqual(3_2 , targets['input_ids'].shape[1]) @require_torch def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __snake_case : Optional[Any] = tokenizer( ['I am a small frog' * 1_0_2_4, 'I am a small frog'] , padding=__a , truncation=__a , return_tensors='pt') self.assertIsInstance(__a , __a) self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2)) @require_torch def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : int = ['A long paragraph for summarization.'] __snake_case : List[str] = [ 'Summary of the text.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __snake_case : str = tokenizer(__a , return_tensors='pt') __snake_case : Union[str, Any] = tokenizer(text_target=__a , return_tensors='pt') __snake_case : Any = inputs['input_ids'] __snake_case : List[Any] = targets['input_ids'] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item()) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item()) @require_torch def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __snake_case : List[Any] = ['Summary of the text.', 'Another summary.'] __snake_case : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __snake_case : List[str] = tokenizer(__a , padding=__a) __snake_case : Optional[Any] = [[0] * len(__a) for x in encoded_output['input_ids']] __snake_case : Dict = tokenizer.pad(__a) self.assertSequenceEqual(outputs['global_attention_mask'] , __a) def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""): __snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a) __snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained(__a , **__a) __snake_case : List[str] = 'A, <mask> AllenNLP sentence.' __snake_case : int = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a) __snake_case : Optional[int] = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a) self.assertEqual(sum(tokens_r['token_type_ids']) , sum(tokens_p['token_type_ids'])) self.assertEqual( sum(tokens_r['attention_mask']) / len(tokens_r['attention_mask']) , sum(tokens_p['attention_mask']) / len(tokens_p['attention_mask']) , ) __snake_case : Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids']) __snake_case : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids']) self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2]) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2]) self.assertSequenceEqual( __a , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>']) self.assertSequenceEqual( __a , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'])
717
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class a_ : _snake_case = 42 _snake_case = None _snake_case = None def _SCREAMING_SNAKE_CASE ( ) -> Node | None: """simple docstring""" __snake_case : str = Node(1 ) __snake_case : Tuple = Node(2 ) __snake_case : Optional[int] = Node(3 ) __snake_case : List[str] = Node(4 ) __snake_case : List[str] = Node(5 ) return tree def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> int: """simple docstring""" return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] if root is None: return output __snake_case : Optional[int] = deque([root] ) while process_queue: __snake_case : List[str] = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(A , A ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(A , A ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None] | list[Any]: """simple docstring""" if root is None: return [] __snake_case : list[Sequence[Node | None]] = [] __snake_case : List[Any] = 0 __snake_case : int = height(A ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(A , A ) ) __snake_case : int = 1 else: output.append(get_nodes_from_right_to_left(A , A ) ) __snake_case : Tuple = 0 return output def _SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing. """simple docstring""" __snake_case : Optional[int] = make_tree() print(F"""In-order Traversal: {inorder(A )}""" ) print(F"""Pre-order Traversal: {preorder(A )}""" ) print(F"""Post-order Traversal: {postorder(A )}""" , '\n' ) print(F"""Height of Tree: {height(A )}""" , '\n' ) print('Complete Level Order Traversal: ' ) print(level_order(A ) , '\n' ) print('Level-wise order Traversal: ' ) for level in range(1 , height(A ) + 1 ): print(F"""Level {level}:""" , get_nodes_from_left_to_right(A , level=A ) ) print('\nZigZag order Traversal: ' ) print(zigzag(A ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
61
0
'''simple docstring''' from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class a_ ( UpperCamelCase_ ): def __init__(self , __a = None , __a = None , __a = None , __a = None , __a = False , __a = False , __a = None , **__a , ) -> List[str]: """simple docstring""" __snake_case : Union[str, Any] = path_or_paths __snake_case : Tuple = split if split or isinstance(__a , __a) else 'train' __snake_case : List[str] = features __snake_case : List[Any] = cache_dir __snake_case : Any = keep_in_memory __snake_case : Optional[Any] = streaming __snake_case : Optional[Any] = num_proc __snake_case : Tuple = kwargs @abstractmethod def SCREAMING_SNAKE_CASE__ (self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]: """simple docstring""" pass class a_ ( UpperCamelCase_ ): def __init__(self , __a = None , __a = None , __a = False , __a = False , __a = None , **__a , ) -> Any: """simple docstring""" __snake_case : List[str] = features __snake_case : List[str] = cache_dir __snake_case : Any = keep_in_memory __snake_case : Optional[int] = streaming __snake_case : List[Any] = num_proc __snake_case : Dict = kwargs @abstractmethod def SCREAMING_SNAKE_CASE__ (self) -> Union[Dataset, IterableDataset]: """simple docstring""" pass
718
'''simple docstring''' from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class a_ : def __init__(self , __a = None) -> None: """simple docstring""" if components is None: __snake_case : List[str] = [] __snake_case : Optional[int] = list(__a) def __len__(self) -> int: """simple docstring""" return len(self.__components) def __str__(self) -> str: """simple docstring""" return "(" + ",".join(map(__a , self.__components)) + ")" def __add__(self , __a) -> Vector: """simple docstring""" __snake_case : Optional[Any] = len(self) if size == len(__a): __snake_case : Optional[int] = [self.__components[i] + other.component(__a) for i in range(__a)] return Vector(__a) else: raise Exception('must have the same size') def __sub__(self , __a) -> Vector: """simple docstring""" __snake_case : Optional[Any] = len(self) if size == len(__a): __snake_case : Optional[int] = [self.__components[i] - other.component(__a) for i in range(__a)] return Vector(__a) else: # error case raise Exception('must have the same size') @overload def __mul__(self , __a) -> Vector: """simple docstring""" ... @overload def __mul__(self , __a) -> float: """simple docstring""" ... def __mul__(self , __a) -> float | Vector: """simple docstring""" if isinstance(__a , (float, int)): __snake_case : str = [c * other for c in self.__components] return Vector(__a) elif isinstance(__a , __a) and len(self) == len(__a): __snake_case : List[Any] = len(self) __snake_case : Dict = [self.__components[i] * other.component(__a) for i in range(__a)] return sum(__a) else: # error case raise Exception('invalid operand!') def SCREAMING_SNAKE_CASE__ (self) -> Vector: """simple docstring""" return Vector(self.__components) def SCREAMING_SNAKE_CASE__ (self , __a) -> float: """simple docstring""" if isinstance(__a , __a) and -len(self.__components) <= i < len(self.__components): return self.__components[i] else: raise Exception('index out of range') def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> None: """simple docstring""" assert -len(self.__components) <= pos < len(self.__components) __snake_case : int = value def SCREAMING_SNAKE_CASE__ (self) -> float: """simple docstring""" if len(self.__components) == 0: raise Exception('Vector is empty') __snake_case : Tuple = [c**2 for c in self.__components] return math.sqrt(sum(__a)) def SCREAMING_SNAKE_CASE__ (self , __a , __a = False) -> float: """simple docstring""" __snake_case : Tuple = self * other __snake_case : Optional[int] = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den)) else: return math.acos(num / den) def _SCREAMING_SNAKE_CASE ( A : int ) -> Vector: """simple docstring""" assert isinstance(A , A ) return Vector([0] * dimension ) def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> Vector: """simple docstring""" assert isinstance(A , A ) and (isinstance(A , A )) __snake_case : Any = [0] * dimension __snake_case : int = 1 return Vector(A ) def _SCREAMING_SNAKE_CASE ( A : float , A : Vector , A : Vector ) -> Vector: """simple docstring""" assert ( isinstance(A , A ) and isinstance(A , A ) and (isinstance(A , (int, float) )) ) return x * scalar + y def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int ) -> Vector: """simple docstring""" random.seed(A ) __snake_case : List[Any] = [random.randint(A , A ) for _ in range(A )] return Vector(A ) class a_ : def __init__(self , __a , __a , __a) -> None: """simple docstring""" __snake_case : Union[str, Any] = matrix __snake_case : int = w __snake_case : str = h def __str__(self) -> str: """simple docstring""" __snake_case : Dict = '' for i in range(self.__height): ans += "|" for j in range(self.__width): if j < self.__width - 1: ans += str(self.__matrix[i][j]) + "," else: ans += str(self.__matrix[i][j]) + "|\n" return ans def __add__(self , __a) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __snake_case : Tuple = [] for i in range(self.__height): __snake_case : List[Any] = [ self.__matrix[i][j] + other.component(__a , __a) for j in range(self.__width) ] matrix.append(__a) return Matrix(__a , self.__width , self.__height) else: raise Exception('matrix must have the same dimension!') def __sub__(self , __a) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __snake_case : str = [] for i in range(self.__height): __snake_case : List[str] = [ self.__matrix[i][j] - other.component(__a , __a) for j in range(self.__width) ] matrix.append(__a) return Matrix(__a , self.__width , self.__height) else: raise Exception('matrices must have the same dimension!') @overload def __mul__(self , __a) -> Matrix: """simple docstring""" ... @overload def __mul__(self , __a) -> Vector: """simple docstring""" ... def __mul__(self , __a) -> Vector | Matrix: """simple docstring""" if isinstance(__a , __a): # matrix-vector if len(__a) == self.__width: __snake_case : Tuple = zero_vector(self.__height) for i in range(self.__height): __snake_case : Union[str, Any] = [ self.__matrix[i][j] * other.component(__a) for j in range(self.__width) ] ans.change_component(__a , sum(__a)) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!') elif isinstance(__a , (int, float)): # matrix-scalar __snake_case : str = [ [self.__matrix[i][j] * other for j in range(self.__width)] for i in range(self.__height) ] return Matrix(__a , self.__width , self.__height) return None def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.__height def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.__width def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds') def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> None: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: __snake_case : List[Any] = value else: raise Exception('change_component: indices out of bounds') def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') __snake_case : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(__a)): __snake_case : Tuple = minor[i][:y] + minor[i][y + 1 :] return Matrix(__a , self.__width - 1 , self.__height - 1).determinant() def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(__a , __a) else: raise Exception('Indices out of bounds') def SCREAMING_SNAKE_CASE__ (self) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') if self.__height < 1: raise Exception('Matrix has no element') elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __snake_case : Any = [ self.__matrix[0][y] * self.cofactor(0 , __a) for y in range(self.__width) ] return sum(__a) def _SCREAMING_SNAKE_CASE ( A : int ) -> Matrix: """simple docstring""" __snake_case : list[list[float]] = [[0] * n for _ in range(A )] return Matrix(A , A , A ) def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int , A : int ) -> Matrix: """simple docstring""" random.seed(A ) __snake_case : list[list[float]] = [ [random.randint(A , A ) for _ in range(A )] for _ in range(A ) ] return Matrix(A , A , A )
61
0
'''simple docstring''' from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. __A = 1_0 def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : list[int] , A : int ) -> int: """simple docstring""" for i in range(A , A ): if array[i] == target: return i return -1 def _SCREAMING_SNAKE_CASE ( A : list[int] , A : int ) -> int: """simple docstring""" __snake_case : Any = 0 __snake_case : Any = len(A ) while left <= right: if right - left < precision: return lin_search(A , A , A , A ) __snake_case : Dict = (left + right) // 3 + 1 __snake_case : Optional[Any] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: __snake_case : Optional[int] = one_third - 1 elif array[two_third] < target: __snake_case : Tuple = two_third + 1 else: __snake_case : str = one_third + 1 __snake_case : Union[str, Any] = two_third - 1 else: return -1 def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : list[int] , A : int ) -> int: """simple docstring""" if left < right: if right - left < precision: return lin_search(A , A , A , A ) __snake_case : Union[str, Any] = (left + right) // 3 + 1 __snake_case : List[str] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(A , one_third - 1 , A , A ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , A , A , A ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , A , A ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() __A = input('''Enter numbers separated by comma:\n''').strip() __A = [int(item.strip()) for item in user_input.split(''',''')] assert collection == sorted(collection), f"List must be ordered.\n{collection}." __A = int(input('''Enter the number to be found in the list:\n''').strip()) __A = ite_ternary_search(collection, target) __A = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f'''Iterative search: {target} found at positions: {resulta}''') print(f'''Recursive search: {target} found at positions: {resulta}''') else: print('''Not found''')
719
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification __A = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co __A = '''main''' # Default branch name __A = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2''' # One particular commit (not the top of `main`) __A = '''aaaaaaa''' # This commit does not exist, so we should 404. __A = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684''' # Sha-1 of config.json on the top of `main`, for checking purposes __A = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3''' @contextlib.contextmanager def _SCREAMING_SNAKE_CASE ( ) -> List[Any]: """simple docstring""" print('Welcome!' ) yield print('Bye!' ) @contextlib.contextmanager def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: """simple docstring""" print('Bonjour!' ) yield print('Au revoir!' ) class a_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" assert transformers.__spec__ is not None assert importlib.util.find_spec('transformers') is not None class a_ ( unittest.TestCase ): @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> int: """simple docstring""" with ContextManagers([]): print('Transformers are awesome!') # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n') @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]: """simple docstring""" with ContextManagers([context_en()]): print('Transformers are awesome!') # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n') @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> Tuple: """simple docstring""" with ContextManagers([context_fr(), context_en()]): print('Transformers are awesome!') # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n') @require_torch def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" self.assertEqual(find_labels(__a) , ['labels']) self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label']) self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions']) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , ['labels']) @require_tf def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" self.assertEqual(find_labels(__a) , ['labels']) self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label']) self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions']) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , ['labels']) @require_flax def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" self.assertEqual(find_labels(__a) , []) self.assertEqual(find_labels(__a) , []) self.assertEqual(find_labels(__a) , []) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , [])
61
0
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def _SCREAMING_SNAKE_CASE ( A : str ) -> str: """simple docstring""" __snake_case : Optional[int] = tmp_path / 'file.csv' __snake_case : Optional[int] = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(A , 'w' ) as f: f.write(A ) return str(A ) @pytest.fixture def _SCREAMING_SNAKE_CASE ( A : List[str] ) -> Dict: """simple docstring""" __snake_case : Union[str, Any] = tmp_path / 'malformed_file.csv' __snake_case : Dict = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(A , 'w' ) as f: f.write(A ) return str(A ) @pytest.fixture def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] , A : Any ) -> Union[str, Any]: """simple docstring""" __snake_case : Union[str, Any] = tmp_path / 'csv_with_image.csv' __snake_case : Optional[int] = textwrap.dedent( F"""\ image {image_file} """ ) with open(A , 'w' ) as f: f.write(A ) return str(A ) @pytest.fixture def _SCREAMING_SNAKE_CASE ( A : int ) -> str: """simple docstring""" __snake_case : int = tmp_path / 'csv_with_label.csv' __snake_case : Optional[int] = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(A , 'w' ) as f: f.write(A ) return str(A ) @pytest.fixture def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] ) -> Dict: """simple docstring""" __snake_case : str = tmp_path / 'csv_with_int_list.csv' __snake_case : Optional[int] = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(A , 'w' ) as f: f.write(A ) return str(A ) def _SCREAMING_SNAKE_CASE ( A : List[str] , A : int , A : Tuple ) -> Tuple: """simple docstring""" __snake_case : Union[str, Any] = Csv() __snake_case : Dict = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(A , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(A ) in record.message for record in caplog.records ) @require_pil def _SCREAMING_SNAKE_CASE ( A : Dict ) -> Optional[int]: """simple docstring""" with open(A , encoding='utf-8' ) as f: __snake_case : int = f.read().splitlines()[1] __snake_case : List[Any] = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) __snake_case : List[Any] = csv._generate_tables([[csv_file_with_image]] ) __snake_case : int = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() __snake_case : Optional[Any] = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def _SCREAMING_SNAKE_CASE ( A : Dict ) -> List[Any]: """simple docstring""" with open(A , encoding='utf-8' ) as f: __snake_case : List[str] = f.read().splitlines()[1:] __snake_case : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) __snake_case : Union[str, Any] = csv._generate_tables([[csv_file_with_label]] ) __snake_case : Dict = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() __snake_case : List[Any] = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(A ) for label in labels] def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> int: """simple docstring""" __snake_case : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda A : [int(A ) for i in x.split()]} ) __snake_case : Union[str, Any] = csv._generate_tables([[csv_file_with_int_list]] ) __snake_case : Optional[int] = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) __snake_case : Any = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
720
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''TimmBackbone'''] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
0
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE ( A : list[int] , A : list[int] , A : int ) -> tuple[float, list[float]]: """simple docstring""" __snake_case : Any = list(range(len(A ) ) ) __snake_case : Optional[int] = [v / w for v, w in zip(A , A )] index.sort(key=lambda A : ratio[i] , reverse=A ) __snake_case : float = 0 __snake_case : list[float] = [0] * len(A ) for i in index: if weight[i] <= capacity: __snake_case : Union[str, Any] = 1 max_value += value[i] capacity -= weight[i] else: __snake_case : Optional[Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
721
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" __snake_case : str = 1 for i in range(1 , num + 1 ): fact *= i return fact def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" __snake_case : Union[str, Any] = 0 while number > 0: __snake_case : Dict = number % 10 sum_of_digits += last_digit __snake_case : Union[str, Any] = number // 10 # Removing the last_digit from the given number return sum_of_digits def _SCREAMING_SNAKE_CASE ( A : int = 1_00 ) -> int: """simple docstring""" __snake_case : List[Any] = factorial(A ) __snake_case : Dict = split_and_add(A ) return result if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
61
0
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = StableDiffusionControlNetImgaImgPipeline A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} ) A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def __A ( self: Optional[int] ) -> Optional[Any]: torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) torch.manual_seed(0 ) _A = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) _A = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__A , set_alpha_to_one=__A , ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) _A = CLIPTextModel(__A ) _A = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _A = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __A ( self: Optional[Any] , __A: Tuple , __A: Optional[int]=0 ) -> str: if str(__A ).startswith('''mps''' ): _A = torch.manual_seed(__A ) else: _A = torch.Generator(device=__A ).manual_seed(__A ) _A = 2 _A = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ) _A = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A ) _A = image.cpu().permute(0 , 2 , 3 , 1 )[0] _A = Image.fromarray(np.uinta(__A ) ).convert('''RGB''' ).resize((64, 64) ) _A = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def __A ( self: List[Any] ) -> List[Any]: return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __A ( self: str ) -> int: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def __A ( self: str ) -> Optional[Any]: self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = StableDiffusionControlNetImgaImgPipeline A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS A_ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def __A ( self: int ) -> str: torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(__A: List[str] ): if isinstance(__A , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) _A = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__A ) torch.manual_seed(0 ) _A = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__A ) torch.manual_seed(0 ) _A = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__A , set_alpha_to_one=__A , ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) _A = CLIPTextModel(__A ) _A = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _A = MultiControlNetModel([controlneta, controlneta] ) _A = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __A ( self: Optional[int] , __A: List[Any] , __A: Optional[Any]=0 ) -> Optional[int]: if str(__A ).startswith('''mps''' ): _A = torch.manual_seed(__A ) else: _A = torch.Generator(device=__A ).manual_seed(__A ) _A = 2 _A = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ), ] _A = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A ) _A = image.cpu().permute(0 , 2 , 3 , 1 )[0] _A = Image.fromarray(np.uinta(__A ) ).convert('''RGB''' ).resize((64, 64) ) _A = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def __A ( self: Optional[int] ) -> int: _A = self.get_dummy_components() _A = self.pipeline_class(**__A ) pipe.to(__A ) _A = 10.0 _A = 4 _A = self.get_dummy_inputs(__A ) _A = steps _A = scale _A = pipe(**__A )[0] _A = self.get_dummy_inputs(__A ) _A = steps _A = scale _A = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] _A = self.get_dummy_inputs(__A ) _A = steps _A = scale _A = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] _A = self.get_dummy_inputs(__A ) _A = steps _A = scale _A = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 def __A ( self: Union[str, Any] ) -> Any: return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __A ( self: Optional[int] ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def __A ( self: Union[str, Any] ) -> int: self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) def __A ( self: str ) -> Optional[int]: _A = self.get_dummy_components() _A = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(__A ) except NotImplementedError: pass @slow @require_torch_gpu class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: Union[str, Any] ) -> str: super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self: List[str] ) -> List[str]: _A = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' ) _A = StableDiffusionControlNetImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , safety_checker=__A , controlnet=__A ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__A ) _A = torch.Generator(device='''cpu''' ).manual_seed(0 ) _A = '''evil space-punk bird''' _A = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((5_12, 5_12) ) _A = load_image( '''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((5_12, 5_12) ) _A = pipe( __A , __A , control_image=__A , generator=__A , output_type='''np''' , num_inference_steps=50 , strength=0.6 , ) _A = output.images[0] assert image.shape == (5_12, 5_12, 3) _A = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' ) assert np.abs(expected_image - image ).max() < 9e-2
62
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __A = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: int , __A: Optional[int] , __A: Optional[Any] ) -> str: _A = question_encoder _A = generator _A = self.question_encoder def __A ( self: Optional[int] , __A: Union[str, Any] ) -> Dict: if os.path.isfile(__A ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__A , exist_ok=__A ) _A = os.path.join(__A , '''question_encoder_tokenizer''' ) _A = os.path.join(__A , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(__A ) self.generator.save_pretrained(__A ) @classmethod def __A ( cls: Optional[Any] , __A: List[str] , **__A: int ) -> Any: # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer _A = kwargs.pop('''config''' , __A ) if config is None: _A = RagConfig.from_pretrained(__A ) _A = AutoTokenizer.from_pretrained( __A , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) _A = AutoTokenizer.from_pretrained( __A , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=__A , generator=__A ) def __call__( self: int , *__A: Optional[int] , **__A: List[str] ) -> int: return self.current_tokenizer(*__A , **__A ) def __A ( self: Dict , *__A: List[str] , **__A: List[str] ) -> Dict: return self.generator.batch_decode(*__A , **__A ) def __A ( self: Union[str, Any] , *__A: Tuple , **__A: List[str] ) -> Tuple: return self.generator.decode(*__A , **__A ) def __A ( self: Dict ) -> List[str]: _A = self.question_encoder def __A ( self: Union[str, Any] ) -> int: _A = self.generator def __A ( self: Dict , __A: List[str] , __A: Optional[List[str]] = None , __A: Optional[int] = None , __A: Optional[int] = None , __A: str = "longest" , __A: str = None , __A: bool = True , **__A: Tuple , ) -> BatchEncoding: warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , __A , ) if max_length is None: _A = self.current_tokenizer.model_max_length _A = self( __A , add_special_tokens=__A , return_tensors=__A , max_length=__A , padding=__A , truncation=__A , **__A , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: _A = self.current_tokenizer.model_max_length _A = self( text_target=__A , add_special_tokens=__A , return_tensors=__A , padding=__A , max_length=__A , truncation=__A , **__A , ) _A = labels['''input_ids'''] return model_inputs
62
1
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: __A = None __A = logging.get_logger(__name__) __A = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} __A = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json', 'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json', }, } __A = { 'facebook/mbart-large-en-ro': 1024, 'facebook/mbart-large-cc25': 1024, } # fmt: off __A = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = ["input_ids", "attention_mask"] A_ = MBartTokenizer A_ = [] A_ = [] def __init__( self: str , __A: Optional[int]=None , __A: List[str]=None , __A: Optional[int]="<s>" , __A: Tuple="</s>" , __A: List[str]="</s>" , __A: Tuple="<s>" , __A: Dict="<unk>" , __A: Optional[Any]="<pad>" , __A: str="<mask>" , __A: List[Any]=None , __A: Any=None , __A: Union[str, Any]=None , **__A: int , ) -> str: # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( vocab_file=__A , tokenizer_file=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , src_lang=__A , tgt_lang=__A , additional_special_tokens=__A , **__A , ) _A = vocab_file _A = False if not self.vocab_file else True _A = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) _A = { lang_code: self.convert_tokens_to_ids(__A ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _A = src_lang if src_lang is not None else '''en_XX''' _A = self.convert_tokens_to_ids(self._src_lang ) _A = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __A ( self: Union[str, Any] ) -> str: return self._src_lang @src_lang.setter def __A ( self: List[Any] , __A: str ) -> None: _A = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __A ( self: Union[str, Any] , __A: List[int] , __A: Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __A ( self: str , __A: List[int] , __A: Optional[List[int]] = None ) -> List[int]: _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __A ( self: List[Any] , __A: List[Any] , __A: str , __A: Optional[str] , __A: Optional[str] , **__A: Any ) -> Any: if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) _A = src_lang _A = self(__A , add_special_tokens=__A , return_tensors=__A , **__A ) _A = self.convert_tokens_to_ids(__A ) _A = tgt_lang_id return inputs def __A ( self: List[str] , __A: List[str] , __A: str = "en_XX" , __A: Optional[List[str]] = None , __A: str = "ro_RO" , **__A: int , ) -> BatchEncoding: _A = src_lang _A = tgt_lang return super().prepare_seqaseq_batch(__A , __A , **__A ) def __A ( self: List[Any] ) -> Any: return self.set_src_lang_special_tokens(self.src_lang ) def __A ( self: Optional[Any] ) -> Dict: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __A ( self: Tuple , __A: List[Any] ) -> None: _A = self.convert_tokens_to_ids(__A ) _A = [] _A = [self.eos_token_id, self.cur_lang_code] _A = self.convert_ids_to_tokens(self.prefix_tokens ) _A = self.convert_ids_to_tokens(self.suffix_tokens ) _A = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __A ( self: Optional[int] , __A: str ) -> None: _A = self.convert_tokens_to_ids(__A ) _A = [] _A = [self.eos_token_id, self.cur_lang_code] _A = self.convert_ids_to_tokens(self.prefix_tokens ) _A = self.convert_ids_to_tokens(self.suffix_tokens ) _A = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __A ( self: List[str] , __A: str , __A: Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return _A = os.path.join( __A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ): copyfile(self.vocab_file , __A ) return (out_vocab_file,)
62
from __future__ import annotations def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): # noqa: E741 '''simple docstring''' while r - l > 1: _A = (l + r) // 2 if v[m] >= key: _A = m else: _A = m # noqa: E741 return r def __A ( _lowercase ): '''simple docstring''' if len(_lowercase ) == 0: return 0 _A = [0] * len(_lowercase ) _A = 1 _A = v[0] for i in range(1 , len(_lowercase ) ): if v[i] < tail[0]: _A = v[i] elif v[i] > tail[length - 1]: _A = v[i] length += 1 else: _A = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
62
1
def __A ( ): '''simple docstring''' _A = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] _A = 6 _A = 1 _A = 19_01 _A = 0 while year < 20_01: day += 7 if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _A = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 _A = day - 29 else: if day > days_per_month[month - 1]: month += 1 _A = day - days_per_month[month - 2] if month > 12: year += 1 _A = 1 if year < 20_01 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
62
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __A = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "sequence-classification" def __init__( self: str , __A: Union[str, Any] ) -> List[str]: if type(__A ) == dict: _A = Namespace(**__A ) _A = glue_output_modes[hparams.task] _A = glue_tasks_num_labels[hparams.task] super().__init__(__A , __A , self.mode ) def __A ( self: Optional[Any] , **__A: Union[str, Any] ) -> Optional[int]: return self.model(**__A ) def __A ( self: Any , __A: Union[str, Any] , __A: int ) -> Optional[Any]: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A = outputs[0] _A = self.trainer.lr_schedulers[0]['''scheduler'''] _A = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def __A ( self: List[str] ) -> Dict: _A = self.hparams _A = processors[args.task]() _A = processor.get_labels() for mode in ["train", "dev"]: _A = self._feature_file(__A ) if os.path.exists(__A ) and not args.overwrite_cache: logger.info('''Loading features from cached file %s''' , __A ) else: logger.info('''Creating features from dataset file at %s''' , args.data_dir ) _A = ( processor.get_dev_examples(args.data_dir ) if mode == '''dev''' else processor.get_train_examples(args.data_dir ) ) _A = convert_examples_to_features( __A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info('''Saving features into cached file %s''' , __A ) torch.save(__A , __A ) def __A ( self: List[str] , __A: str , __A: int , __A: bool = False ) -> DataLoader: _A = '''dev''' if mode == '''test''' else mode _A = self._feature_file(__A ) logger.info('''Loading features from cached file %s''' , __A ) _A = torch.load(__A ) _A = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) _A = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) _A = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": _A = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": _A = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , ) def __A ( self: List[str] , __A: str , __A: Tuple ) -> str: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A ,_A = outputs[:2] _A = logits.detach().cpu().numpy() _A = inputs['''labels'''].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __A ( self: str , __A: Dict ) -> tuple: _A = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item() _A = np.concatenate([x['''pred'''] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": _A = np.argmax(__A , axis=1 ) elif self.hparams.glue_output_mode == "regression": _A = np.squeeze(__A ) _A = np.concatenate([x['''target'''] for x in outputs] , axis=0 ) _A = [[] for _ in range(out_label_ids.shape[0] )] _A = [[] for _ in range(out_label_ids.shape[0] )] _A = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )} _A = dict(results.items() ) _A = results return ret, preds_list, out_label_list def __A ( self: Any , __A: list ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __A ( self: int , __A: Union[str, Any] ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __A ( __A: Optional[Any] , __A: Optional[Any] ) -> Optional[Any]: BaseTransformer.add_model_specific_args(__A , __A ) parser.add_argument( '''--max_seq_length''' , default=1_28 , type=__A , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--task''' , default='''''' , type=__A , required=__A , help='''The GLUE task to run''' , ) parser.add_argument( '''--gpus''' , default=0 , type=__A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) return parser def __A ( ): '''simple docstring''' _A = argparse.ArgumentParser() add_generic_args(_lowercase , os.getcwd() ) _A = GLUETransformer.add_model_specific_args(_lowercase , os.getcwd() ) _A = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: _A = os.path.join( '''./results''' , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , ) os.makedirs(args.output_dir ) _A = GLUETransformer(_lowercase ) _A = generic_train(_lowercase , _lowercase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: _A = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_lowercase ) ) _A = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_lowercase ) if __name__ == "__main__": main()
62
1
import copy import re class SCREAMING_SNAKE_CASE : """simple docstring""" A_ = "hp" A_ = {} A_ = None @classmethod def __A ( cls: Optional[Any] , __A: List[str] , __A: int ) -> Optional[Any]: _A = prefix _A = defaults cls.build_naming_info() @staticmethod def __A ( __A: Optional[Any] , __A: Union[str, Any] ) -> Union[str, Any]: if len(__A ) == 0: return "" _A = None if any(char.isdigit() for char in word ): raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(__A ) + 1 ): _A = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: _A = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(__A: Tuple ): _A = '''''' while integer != 0: _A = chr(ord('''A''' ) + integer % 10 ) + s integer //= 10 return s _A = 0 while True: _A = word + '''#''' + int_to_alphabetic(__A ) if sword in info["reverse_short_word"]: continue else: _A = sword break _A = short_word _A = word return short_word @staticmethod def __A ( __A: str , __A: Tuple ) -> List[Any]: _A = param_name.split('''_''' ) _A = [TrialShortNamer.shortname_for_word(__A , __A ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name _A = ['''''', '''_'''] for separator in separators: _A = separator.join(__A ) if shortname not in info["reverse_short_param"]: _A = shortname _A = param_name return shortname return param_name @staticmethod def __A ( __A: Dict , __A: Union[str, Any] ) -> int: _A = TrialShortNamer.shortname_for_key(__A , __A ) _A = short_name _A = param_name @classmethod def __A ( cls: Tuple ) -> List[Any]: if cls.NAMING_INFO is not None: return _A = { '''short_word''': {}, '''reverse_short_word''': {}, '''short_param''': {}, '''reverse_short_param''': {}, } _A = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(__A , __A ) _A = info @classmethod def __A ( cls: List[str] , __A: int ) -> Optional[Any]: cls.build_naming_info() assert cls.PREFIX is not None _A = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue _A = cls.NAMING_INFO['''short_param'''][k] if isinstance(__A , __A ): _A = 1 if v else 0 _A = '''''' if isinstance(__A , (int, float) ) else '''-''' _A = f"""{key}{sep}{v}""" name.append(__A ) return "_".join(__A ) @classmethod def __A ( cls: Any , __A: int ) -> Optional[Any]: _A = repr[len(cls.PREFIX ) + 1 :] if repr == "": _A = [] else: _A = repr.split('''_''' ) _A = {} for value in values: if "-" in value: _A ,_A = value.split('''-''' ) else: _A = re.sub('''[0-9.]''' , '''''' , __A ) _A = float(re.sub('''[^0-9.]''' , '''''' , __A ) ) _A = cls.NAMING_INFO['''reverse_short_param'''][p_k] _A = p_v for k in cls.DEFAULTS: if k not in parameters: _A = cls.DEFAULTS[k] return parameters
62
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def __A ( _lowercase = "" ): '''simple docstring''' _A = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' _A = BeautifulSoup(requests.get(_lowercase ).text , '''html.parser''' ) _A = soup.find_all('''td''' , attrs='''titleColumn''' ) _A = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(_lowercase , _lowercase ) } def __A ( _lowercase = "IMDb_Top_250_Movies.csv" ): '''simple docstring''' _A = get_imdb_top_aaa_movies() with open(_lowercase , '''w''' , newline='''''' ) as out_file: _A = csv.writer(_lowercase ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
62
1
import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Union[str, Any] , __A: int , __A: Any=13 , __A: Optional[int]=30 , __A: Union[str, Any]=2 , __A: str=3 , __A: Tuple=True , __A: Dict=True , __A: int=32 , __A: Optional[int]=5 , __A: Optional[int]=4 , __A: str=37 , __A: Any="gelu" , __A: Union[str, Any]=0.1 , __A: List[str]=0.1 , __A: Optional[int]=10 , __A: Tuple=0.02 , __A: str=3 , __A: Union[str, Any]=0.6 , __A: Dict=None , ) -> Any: _A = parent _A = batch_size _A = image_size _A = patch_size _A = num_channels _A = is_training _A = use_labels _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = type_sequence_label_size _A = initializer_range _A = mask_ratio _A = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) _A = (image_size // patch_size) ** 2 _A = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def __A ( self: int ) -> Tuple: _A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = self.get_config() return config, pixel_values, labels def __A ( self: str ) -> str: return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def __A ( self: int , __A: List[str] , __A: List[str] , __A: List[str] ) -> List[Any]: _A = ViTMAEModel(config=__A ) model.to(__A ) model.eval() _A = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self: str , __A: Tuple , __A: Dict , __A: int ) -> Optional[int]: _A = ViTMAEForPreTraining(__A ) model.to(__A ) model.eval() _A = model(__A ) _A = (self.image_size // self.patch_size) ** 2 _A = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images _A = 1 _A = ViTMAEForPreTraining(__A ) model.to(__A ) model.eval() _A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _A = model(__A ) _A = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def __A ( self: Any ) -> str: _A = self.prepare_config_and_inputs() _A ,_A ,_A = config_and_inputs _A = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () A_ = {"feature-extraction": ViTMAEModel} if is_torch_available() else {} A_ = False A_ = False A_ = False A_ = False def __A ( self: Optional[int] ) -> Union[str, Any]: _A = ViTMAEModelTester(self ) _A = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 ) def __A ( self: List[str] ) -> str: self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMAE does not use inputs_embeds''' ) def __A ( self: Optional[Any] ) -> int: pass def __A ( self: List[Any] ) -> Any: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A , nn.Linear ) ) def __A ( self: Optional[Any] ) -> Dict: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __A ) def __A ( self: str ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __A ( self: Optional[Any] ) -> List[str]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__A ) def __A ( self: Any , __A: List[Any] , __A: Tuple , __A: Optional[int] ) -> List[str]: # make masks reproducible np.random.seed(2 ) _A = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) _A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _A = torch.from_numpy(__A ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument _A = pt_noise super().check_pt_tf_models(__A , __A , __A ) def __A ( self: int ) -> Dict: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) model.to(__A ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): _A = model(**self._prepare_for_class(__A , __A ) ) _A = outputs[0].cpu().numpy() _A = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__A ) _A = model_class.from_pretrained(__A ) model.to(__A ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): _A = model(**self._prepare_for_class(__A , __A ) ) # Make sure we don't have nans _A = after_outputs[0].cpu().numpy() _A = 0 _A = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__A , 1e-5 ) @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def __A ( self: Any ) -> int: pass @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def __A ( self: Any ) -> List[str]: pass @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def __A ( self: Union[str, Any] ) -> Optional[Any]: pass @unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' ) def __A ( self: Optional[int] ) -> Any: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: List[Any] ) -> Optional[int]: pass @slow def __A ( self: Union[str, Any] ) -> Dict: for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = ViTMAEModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __A ( ): '''simple docstring''' _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self: Union[str, Any] ) -> Any: return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None @slow def __A ( self: int ) -> Union[str, Any]: # make random mask reproducible across the PT and TF model np.random.seed(2 ) _A = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(__A ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=__A , return_tensors='''pt''' ).to(__A ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) _A = ViTMAEConfig() _A = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) _A = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): _A = model(**__A , noise=torch.from_numpy(__A ).to(device=__A ) ) # verify the logits _A = torch.Size((1, 1_96, 7_68) ) self.assertEqual(outputs.logits.shape , __A ) _A = torch.tensor( [[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__A ) , atol=1e-4 ) )
62
import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = BlenderbotSmallTokenizer A_ = False def __A ( self: List[str] ) -> int: super().setUp() _A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] _A = dict(zip(__A , range(len(__A ) ) ) ) _A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] _A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__A ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__A ) ) def __A ( self: str , **__A: Optional[Any] ) -> Dict: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A ) def __A ( self: str , __A: List[str] ) -> int: _A = '''adapt act apte''' _A = '''adapt act apte''' return input_text, output_text def __A ( self: Union[str, Any] ) -> Any: _A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _A = '''adapt act apte''' _A = ['''adapt''', '''act''', '''ap@@''', '''te'''] _A = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) _A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] _A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) def __A ( self: Any ) -> List[str]: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] _A = '''I am a small frog.''' _A = tok([src_text] , padding=__A , truncation=__A )['''input_ids'''] _A = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def __A ( self: Any ) -> int: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) _A = '''I am a small frog .''' _A = '''.''' _A = tok(__A )['''input_ids'''] _A = tok(__A )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
62
1
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = [ ('bert.bert', 'visual_bert'), ('bert.cls', 'cls'), ('bert.classifier', 'cls'), ('token_type_embeddings_visual', 'visual_token_type_embeddings'), ('position_embeddings_visual', 'visual_position_embeddings'), ('projection', 'visual_projection'), ] __A = [ 'nlvr2_coco_pre_trained.th', 'nlvr2_fine_tuned.th', 'nlvr2_pre_trained.th', 'vcr_coco_pre_train.th', 'vcr_fine_tune.th', 'vcr_pre_train.th', 'vqa_coco_pre_trained.th', 'vqa_fine_tuned.th', 'vqa_pre_trained.th', ] def __A ( _lowercase ): '''simple docstring''' _A = torch.load(_lowercase , map_location='''cpu''' ) return sd def __A ( _lowercase , _lowercase , _lowercase=rename_keys_prefix ): '''simple docstring''' _A = OrderedDict() _A = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue _A = key for name_pair in rename_keys_prefix: _A = new_key.replace(name_pair[0] , name_pair[1] ) _A = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately _A = new_d['''cls.predictions.bias'''] return new_d @torch.no_grad() def __A ( _lowercase , _lowercase ): '''simple docstring''' assert ( checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS ), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: _A = '''pretraining''' if "vcr" in checkpoint_path: _A = {'''visual_embedding_dim''': 5_12} elif "vqa_advanced" in checkpoint_path: _A = {'''visual_embedding_dim''': 20_48} elif "vqa" in checkpoint_path: _A = {'''visual_embedding_dim''': 20_48} elif "nlvr" in checkpoint_path: _A = {'''visual_embedding_dim''': 10_24} else: raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: _A = {'''visual_embedding_dim''': 5_12} _A = '''multichoice''' elif "vqa_advanced" in checkpoint_path: _A = {'''visual_embedding_dim''': 20_48} _A = '''vqa_advanced''' elif "vqa" in checkpoint_path: _A = {'''visual_embedding_dim''': 20_48, '''num_labels''': 31_29} _A = '''vqa''' elif "nlvr" in checkpoint_path: _A = { '''visual_embedding_dim''': 10_24, '''num_labels''': 2, } _A = '''nlvr''' _A = VisualBertConfig(**_lowercase ) # Load State Dict _A = load_state_dict(_lowercase ) _A = get_new_dict(_lowercase , _lowercase ) if model_type == "pretraining": _A = VisualBertForPreTraining(_lowercase ) elif model_type == "vqa": _A = VisualBertForQuestionAnswering(_lowercase ) elif model_type == "nlvr": _A = VisualBertForVisualReasoning(_lowercase ) elif model_type == "multichoice": _A = VisualBertForMultipleChoice(_lowercase ) model.load_state_dict(_lowercase ) # Save Checkpoints Path(_lowercase ).mkdir(exist_ok=_lowercase ) model.save_pretrained(_lowercase ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.') parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.') __A = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
62
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "roberta" def __init__( self: Dict , __A: int=5_02_65 , __A: Union[str, Any]=7_68 , __A: Union[str, Any]=12 , __A: str=12 , __A: int=30_72 , __A: str="gelu" , __A: Union[str, Any]=0.1 , __A: int=0.1 , __A: Optional[int]=5_12 , __A: Union[str, Any]=2 , __A: str=0.02 , __A: str=1e-12 , __A: Any=1 , __A: str=0 , __A: Any=2 , __A: Optional[int]="absolute" , __A: Optional[Any]=True , __A: Union[str, Any]=None , **__A: List[str] , ) -> Dict: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = use_cache _A = classifier_dropout class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" @property def __A ( self: Dict ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _A = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
62
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = ( "This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image." "It takes two arguments named `image` which should be the original image, and `label` which should be a text " "describing the elements what should be identified in the segmentation mask. The tool returns the mask." ) A_ = "CIDAS/clipseg-rd64-refined" A_ = "image_segmenter" A_ = CLIPSegForImageSegmentation A_ = ["image", "text"] A_ = ["image"] def __init__( self: Optional[int] , *__A: Optional[int] , **__A: Tuple ) -> Dict: requires_backends(self , ['''vision'''] ) super().__init__(*__A , **__A ) def __A ( self: Any , __A: "Image" , __A: str ) -> List[Any]: return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors='''pt''' ) def __A ( self: int , __A: Optional[int] ) -> str: with torch.no_grad(): _A = self.model(**__A ).logits return logits def __A ( self: Optional[int] , __A: Dict ) -> Union[str, Any]: _A = outputs.cpu().detach().numpy() _A = 0 _A = 1 return Image.fromarray((array * 2_55).astype(np.uinta ) )
62
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput __A = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: int , *__A: str , __A: List[Any]=None , __A: Union[str, Any]=None , __A: List[Any]=None , **__A: int ) -> List[Any]: super().__init__(*__A , **__A ) _A = eval_examples _A = post_process_function _A = quant_trainer_args _A = 1_28 # default number of calibration samples def __A ( self: Union[str, Any] , __A: List[Any]=None ) -> Optional[Any]: if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) _A = calib_dataset if calib_dataset is not None else self.calib_dataset _A = self._remove_unused_columns(__A , description='''Calibration''' ) return DataLoader( __A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__A , ) def __A ( self: List[Any] , __A: Any=None ) -> Optional[int]: _A = self.train_dataset if calib_dataset is None else calib_dataset _A = self.get_calib_dataloader(__A ) _A = self.model quant_trainer.configure_model(__A , self.quant_trainer_args , calib=__A ) model.eval() quant_trainer.enable_calibration(__A ) logger.info('''***** Running calibration *****''' ) logger.info(f""" Num examples = {self.calib_num}""" ) logger.info(f""" Batch size = {calib_dataloader.batch_size}""" ) for step, inputs in enumerate(__A ): # Prediction step _A ,_A ,_A = self.prediction_step(__A , __A , prediction_loss_only=__A ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(__A , self.quant_trainer_args ) _A = model def __A ( self: Any , __A: Dict=None , __A: Tuple=None , __A: List[Any]=None , __A: str = "eval" ) -> int: _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(__A ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: _A = self.post_process_function(__A , __A , output.predictions ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) self.log(__A ) else: _A = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , __A ) return metrics def __A ( self: Union[str, Any] , __A: Optional[int] , __A: int , __A: List[Any]=None , __A: str = "test" ) -> Union[str, Any]: _A = self.get_test_dataloader(__A ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(__A , __A , output.predictions , '''predict''' ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__A ) def __A ( self: Tuple , __A: Optional[Any]="./" ) -> List[str]: _A = self.eval_dataset _A = self.get_eval_dataloader(__A ) _A = next(iter(__A ) ) # saving device - to make it consistent _A = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple _A = tuple(v.to(__A ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer _A = True _A = self.model.to(__A ) model.eval() model.float() _A = model.module if hasattr(__A , '''module''' ) else model quant_trainer.configure_model(__A , self.quant_trainer_args ) _A = os.path.join(__A , '''model.onnx''' ) logger.info(f"""exporting model to {output_model_file}""" ) _A = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( __A , __A , __A , export_params=__A , opset_version=13 , do_constant_folding=__A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=__A , ) logger.info('''onnx export finished''' )
62
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "donut-swin" A_ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self: Tuple , __A: Union[str, Any]=2_24 , __A: Tuple=4 , __A: int=3 , __A: List[str]=96 , __A: Dict=[2, 2, 6, 2] , __A: Any=[3, 6, 12, 24] , __A: Any=7 , __A: int=4.0 , __A: Union[str, Any]=True , __A: Optional[int]=0.0 , __A: Optional[Any]=0.0 , __A: str=0.1 , __A: str="gelu" , __A: Optional[Any]=False , __A: Optional[int]=0.02 , __A: int=1e-5 , **__A: int , ) -> Tuple: super().__init__(**__A ) _A = image_size _A = patch_size _A = num_channels _A = embed_dim _A = depths _A = len(__A ) _A = num_heads _A = window_size _A = mlp_ratio _A = qkv_bias _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = drop_path_rate _A = hidden_act _A = use_absolute_embeddings _A = layer_norm_eps _A = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _A = int(embed_dim * 2 ** (len(__A ) - 1) )
62
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __A = { 'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MegaForCausalLM', 'MegaForMaskedLM', 'MegaForMultipleChoice', 'MegaForQuestionAnswering', 'MegaForSequenceClassification', 'MegaForTokenClassification', 'MegaModel', 'MegaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
1
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( DiffusionPipeline, UnCLIPImageVariationPipeline, UnCLIPScheduler, UNetaDConditionModel, UNetaDModel, ) from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = UnCLIPImageVariationPipeline A_ = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"} A_ = IMAGE_VARIATION_BATCH_PARAMS A_ = [ "generator", "return_dict", "decoder_num_inference_steps", "super_res_num_inference_steps", ] A_ = False @property def __A ( self: int ) -> int: return 32 @property def __A ( self: Tuple ) -> List[str]: return 32 @property def __A ( self: Any ) -> Any: return self.time_input_dim @property def __A ( self: Dict ) -> Optional[Any]: return self.time_input_dim * 4 @property def __A ( self: List[str] ) -> Any: return 1_00 @property def __A ( self: Dict ) -> Optional[int]: _A = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def __A ( self: Union[str, Any] ) -> Optional[Any]: torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModelWithProjection(__A ) @property def __A ( self: List[Any] ) -> Optional[int]: torch.manual_seed(0 ) _A = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) return CLIPVisionModelWithProjection(__A ) @property def __A ( self: List[Any] ) -> Tuple: torch.manual_seed(0 ) _A = { '''clip_embeddings_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''cross_attention_dim''': self.cross_attention_dim, } _A = UnCLIPTextProjModel(**__A ) return model @property def __A ( self: Tuple ) -> str: torch.manual_seed(0 ) _A = { '''sample_size''': 32, # RGB in channels '''in_channels''': 3, # Out channels is double in channels because predicts mean and variance '''out_channels''': 6, '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': '''identity''', } _A = UNetaDConditionModel(**__A ) return model @property def __A ( self: Union[str, Any] ) -> str: return { "sample_size": 64, "layers_per_block": 1, "down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"), "up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"), "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "in_channels": 6, "out_channels": 3, } @property def __A ( self: Union[str, Any] ) -> Optional[int]: torch.manual_seed(0 ) _A = UNetaDModel(**self.dummy_super_res_kwargs ) return model @property def __A ( self: int ) -> List[Any]: # seeded differently to get different unet than `self.dummy_super_res_first` torch.manual_seed(1 ) _A = UNetaDModel(**self.dummy_super_res_kwargs ) return model def __A ( self: Tuple ) -> int: _A = self.dummy_decoder _A = self.dummy_text_proj _A = self.dummy_text_encoder _A = self.dummy_tokenizer _A = self.dummy_super_res_first _A = self.dummy_super_res_last _A = UnCLIPScheduler( variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=10_00 , ) _A = UnCLIPScheduler( variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=10_00 , ) _A = CLIPImageProcessor(crop_size=32 , size=32 ) _A = self.dummy_image_encoder return { "decoder": decoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_proj": text_proj, "feature_extractor": feature_extractor, "image_encoder": image_encoder, "super_res_first": super_res_first, "super_res_last": super_res_last, "decoder_scheduler": decoder_scheduler, "super_res_scheduler": super_res_scheduler, } def __A ( self: List[Any] , __A: List[Any] , __A: Any=0 , __A: List[str]=True ) -> Optional[Any]: _A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A ) if str(__A ).startswith('''mps''' ): _A = torch.manual_seed(__A ) else: _A = torch.Generator(device=__A ).manual_seed(__A ) if pil_image: _A = input_image * 0.5 + 0.5 _A = input_image.clamp(0 , 1 ) _A = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() _A = DiffusionPipeline.numpy_to_pil(__A )[0] return { "image": input_image, "generator": generator, "decoder_num_inference_steps": 2, "super_res_num_inference_steps": 2, "output_type": "np", } def __A ( self: Optional[Any] ) -> Any: _A = '''cpu''' _A = self.get_dummy_components() _A = self.pipeline_class(**__A ) _A = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) _A = self.get_dummy_inputs(__A , pil_image=__A ) _A = pipe(**__A ) _A = output.images _A = self.get_dummy_inputs(__A , pil_image=__A ) _A = pipe( **__A , return_dict=__A , )[0] _A = image[0, -3:, -3:, -1] _A = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _A = np.array( [ 0.9_997, 0.0_002, 0.9_997, 0.9_997, 0.9_969, 0.0_023, 0.9_997, 0.9_969, 0.9_970, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self: Any ) -> Union[str, Any]: _A = '''cpu''' _A = self.get_dummy_components() _A = self.pipeline_class(**__A ) _A = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) _A = self.get_dummy_inputs(__A , pil_image=__A ) _A = pipe(**__A ) _A = output.images _A = self.get_dummy_inputs(__A , pil_image=__A ) _A = pipe( **__A , return_dict=__A , )[0] _A = image[0, -3:, -3:, -1] _A = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _A = np.array([0.9_997, 0.0_003, 0.9_997, 0.9_997, 0.9_970, 0.0_024, 0.9_997, 0.9_971, 0.9_971] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self: Union[str, Any] ) -> List[Any]: _A = '''cpu''' _A = self.get_dummy_components() _A = self.pipeline_class(**__A ) _A = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) _A = self.get_dummy_inputs(__A , pil_image=__A ) _A = [ pipeline_inputs['''image'''], pipeline_inputs['''image'''], ] _A = pipe(**__A ) _A = output.images _A = self.get_dummy_inputs(__A , pil_image=__A ) _A = [ tuple_pipeline_inputs['''image'''], tuple_pipeline_inputs['''image'''], ] _A = pipe( **__A , return_dict=__A , )[0] _A = image[0, -3:, -3:, -1] _A = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (2, 64, 64, 3) _A = np.array( [ 0.9_997, 0.9_989, 0.0_008, 0.0_021, 0.9_960, 0.0_018, 0.0_014, 0.0_002, 0.9_933, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self: int ) -> List[Any]: _A = torch.device('''cpu''' ) class SCREAMING_SNAKE_CASE : """simple docstring""" A_ = 1 _A = self.get_dummy_components() _A = self.pipeline_class(**__A ) _A = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) _A = torch.Generator(device=__A ).manual_seed(0 ) _A = pipe.decoder.dtype _A = 1 _A = ( batch_size, pipe.decoder.config.in_channels, pipe.decoder.config.sample_size, pipe.decoder.config.sample_size, ) _A = pipe.prepare_latents( __A , dtype=__A , device=__A , generator=__A , latents=__A , scheduler=DummyScheduler() ) _A = ( batch_size, pipe.super_res_first.config.in_channels // 2, pipe.super_res_first.config.sample_size, pipe.super_res_first.config.sample_size, ) _A = pipe.prepare_latents( __A , dtype=__A , device=__A , generator=__A , latents=__A , scheduler=DummyScheduler() ) _A = self.get_dummy_inputs(__A , pil_image=__A ) _A = pipe( **__A , decoder_latents=__A , super_res_latents=__A ).images _A = self.get_dummy_inputs(__A , pil_image=__A ) # Don't pass image, instead pass embedding _A = pipeline_inputs.pop('''image''' ) _A = pipe.image_encoder(__A ).image_embeds _A = pipe( **__A , decoder_latents=__A , super_res_latents=__A , image_embeddings=__A , ).images # make sure passing text embeddings manually is identical assert np.abs(img_out_a - img_out_a ).max() < 1e-4 @skip_mps def __A ( self: Optional[Any] ) -> Tuple: _A = torch_device == '''cpu''' # Check is relaxed because there is not a torch 2.0 sliced attention added kv processor _A = 1e-2 self._test_attention_slicing_forward_pass( test_max_difference=__A , expected_max_diff=__A ) @skip_mps def __A ( self: str ) -> str: _A = torch_device == '''cpu''' _A = True _A = [ '''decoder_num_inference_steps''', '''super_res_num_inference_steps''', ] self._test_inference_batch_single_identical( test_max_difference=__A , relax_max_difference=__A , additional_params_copy_to_batched_inputs=__A , ) def __A ( self: Dict ) -> Any: _A = [ '''decoder_num_inference_steps''', '''super_res_num_inference_steps''', ] if torch_device == "mps": # TODO: MPS errors with larger batch sizes _A = [2, 3] self._test_inference_batch_consistent( batch_sizes=__A , additional_params_copy_to_batched_inputs=__A , ) else: self._test_inference_batch_consistent( additional_params_copy_to_batched_inputs=__A ) @skip_mps def __A ( self: List[Any] ) -> List[Any]: return super().test_dict_tuple_outputs_equivalent() @skip_mps def __A ( self: List[str] ) -> Optional[Any]: return super().test_save_load_local() @skip_mps def __A ( self: Tuple ) -> List[str]: return super().test_save_load_optional_components() @slow @require_torch_gpu class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: Dict ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self: List[str] ) -> List[Any]: _A = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' ) _A = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' ) _A = UnCLIPImageVariationPipeline.from_pretrained( '''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa ) _A = pipeline.to(__A ) pipeline.set_progress_bar_config(disable=__A ) _A = torch.Generator(device='''cpu''' ).manual_seed(0 ) _A = pipeline( __A , generator=__A , output_type='''np''' , ) _A = output.images[0] assert image.shape == (2_56, 2_56, 3) assert_mean_pixel_difference(__A , __A , 15 )
62
import itertools import string from collections.abc import Generator, Iterable def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = iter(_lowercase ) while True: _A = tuple(itertools.islice(_lowercase , _lowercase ) ) if not chunk: return yield chunk def __A ( _lowercase ): '''simple docstring''' _A = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _A = '''''' if len(_lowercase ) < 2: return dirty for i in range(len(_lowercase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_lowercase ) & 1: clean += "X" return clean def __A ( _lowercase ): '''simple docstring''' _A = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _A = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_lowercase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_lowercase ) return table def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = prepare_input(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
62
1
def __A ( _lowercase , _lowercase ): '''simple docstring''' return 1 if input_a == input_a else 0 def __A ( ): '''simple docstring''' assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
62
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Tuple , __A: Any , __A: List[Any]=14 , __A: Dict=7 , __A: List[str]=True , __A: Tuple=True , __A: Union[str, Any]=True , __A: List[Any]=True , __A: Optional[int]=True , __A: Tuple=99 , __A: Optional[Any]=32 , __A: List[str]=5 , __A: Dict=4 , __A: str=37 , __A: Dict="gelu" , __A: List[str]=0.1 , __A: str=0.1 , __A: Any=5_12 , __A: Union[str, Any]=16 , __A: List[Any]=2 , __A: Tuple=0.02 , __A: Tuple=3 , __A: Union[str, Any]=4 , __A: Any=None , ) -> Optional[Any]: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_token_type_ids _A = use_input_mask _A = use_labels _A = use_mc_token_ids _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = scope _A = self.vocab_size - 1 def __A ( self: Optional[int] ) -> Union[str, Any]: _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None if self.use_mc_token_ids: _A = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() _A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def __A ( self: Optional[int] ) -> List[Any]: return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: Dict , __A: Optional[int] , __A: List[str] , __A: List[str] , *__A: Optional[int] ) -> Optional[Any]: _A = CTRLModel(config=__A ) model.to(__A ) model.eval() model(__A , token_type_ids=__A , head_mask=__A ) model(__A , token_type_ids=__A ) _A = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def __A ( self: Optional[Any] , __A: List[str] , __A: Dict , __A: List[Any] , __A: List[Any] , __A: Any , *__A: Any ) -> str: _A = CTRLLMHeadModel(__A ) model.to(__A ) model.eval() _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self: Optional[int] ) -> Dict: _A = self.prepare_config_and_inputs() ( ( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) , ) = config_and_inputs _A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask} return config, inputs_dict def __A ( self: List[str] , __A: Dict , __A: Dict , __A: Tuple , __A: List[Any] , *__A: Optional[int] ) -> Any: _A = self.num_labels _A = CTRLForSequenceClassification(__A ) model.to(__A ) model.eval() _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () A_ = (CTRLLMHeadModel,) if is_torch_available() else () A_ = ( { "feature-extraction": CTRLModel, "text-classification": CTRLForSequenceClassification, "text-generation": CTRLLMHeadModel, "zero-shot": CTRLForSequenceClassification, } if is_torch_available() else {} ) A_ = True A_ = False A_ = False def __A ( self: Any , __A: List[Any] , __A: int , __A: Optional[Any] , __A: Optional[int] , __A: List[Any] ) -> List[str]: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def __A ( self: Any ) -> Union[str, Any]: _A = CTRLModelTester(self ) _A = ConfigTester(self , config_class=__A , n_embd=37 ) def __A ( self: Optional[int] ) -> List[Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def __A ( self: Dict ) -> Any: self.config_tester.run_common_tests() def __A ( self: str ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*__A ) def __A ( self: List[str] ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__A ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: Optional[Any] ) -> int: pass @slow def __A ( self: Tuple ) -> Dict: for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = CTRLModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def __A ( self: Any ) -> Union[str, Any]: pass @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: int ) -> Union[str, Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def __A ( self: Any ) -> Any: _A = CTRLLMHeadModel.from_pretrained('''ctrl''' ) model.to(__A ) _A = torch.tensor( [[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=__A ) # Legal the president is _A = [ 1_18_59, 0, 16_11, 8, 5, 1_50, 2_64_49, 2, 19, 3_48, 4_69, 3, 25_95, 48, 2_07_40, 24_65_33, 24_65_33, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a _A = model.generate(__A , do_sample=__A ) self.assertListEqual(output_ids[0].tolist() , __A )
62
1
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar __A = TypeVar('T') class SCREAMING_SNAKE_CASE ( Generic[T] ): """simple docstring""" A_ = 42 # Cache store of keys A_ = 42 # References of the keys in cache A_ = 10 # Maximum capacity of cache def __init__( self: Optional[int] , __A: int ) -> None: _A = deque() _A = set() if not n: _A = sys.maxsize elif n < 0: raise ValueError('''n should be an integer greater than 0.''' ) else: _A = n def __A ( self: Union[str, Any] , __A: T ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: _A = self.dq_store.pop() self.key_reference.remove(__A ) else: self.dq_store.remove(__A ) self.dq_store.appendleft(__A ) self.key_reference.add(__A ) def __A ( self: Any ) -> None: for k in self.dq_store: print(__A ) def __repr__( self: Any ) -> str: return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}""" if __name__ == "__main__": import doctest doctest.testmod() __A = LRUCache(4) lru_cache.refer('A') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('A') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
62
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(_lowercase , _lowercase , _lowercase ) order.append(_lowercase ) return order def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(_lowercase , _lowercase , _lowercase ) return component def __A ( _lowercase ): '''simple docstring''' _A = len(_lowercase ) * [False] _A = {vert: [] for vert in range(len(_lowercase ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(_lowercase ) _A = [] for i, was_visited in enumerate(_lowercase ): if not was_visited: order += topology_sort(_lowercase , _lowercase , _lowercase ) _A = [] _A = len(_lowercase ) * [False] for i in range(len(_lowercase ) ): _A = order[len(_lowercase ) - i - 1] if not visited[vert]: _A = find_components(_lowercase , _lowercase , _lowercase ) components_list.append(_lowercase ) return components_list
62
1
import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = ["image_processor", "tokenizer"] A_ = "BlipImageProcessor" A_ = "AutoTokenizer" def __init__( self: List[Any] , __A: Any , __A: List[Any] , __A: Tuple ) -> Union[str, Any]: super().__init__(__A , __A ) # add QFormer tokenizer _A = qformer_tokenizer def __call__( self: List[Any] , __A: ImageInput = None , __A: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A: bool = True , __A: Union[bool, str, PaddingStrategy] = False , __A: Union[bool, str, TruncationStrategy] = None , __A: Optional[int] = None , __A: int = 0 , __A: Optional[int] = None , __A: Optional[bool] = None , __A: bool = False , __A: bool = False , __A: bool = False , __A: bool = False , __A: bool = False , __A: bool = True , __A: Optional[Union[str, TensorType]] = None , **__A: Optional[Any] , ) -> BatchFeature: if images is None and text is None: raise ValueError('''You have to specify at least images or text.''' ) _A = BatchFeature() if text is not None: _A = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) encoding.update(__A ) _A = self.qformer_tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) _A = qformer_text_encoding.pop('''input_ids''' ) _A = qformer_text_encoding.pop('''attention_mask''' ) if images is not None: _A = self.image_processor(__A , return_tensors=__A ) encoding.update(__A ) return encoding def __A ( self: Any , *__A: Any , **__A: Dict ) -> Union[str, Any]: return self.tokenizer.batch_decode(*__A , **__A ) def __A ( self: Dict , *__A: Tuple , **__A: Any ) -> List[Any]: return self.tokenizer.decode(*__A , **__A ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def __A ( self: Union[str, Any] ) -> str: _A = self.tokenizer.model_input_names _A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def __A ( self: Any , __A: Optional[Any] , **__A: int ) -> Dict: if os.path.isfile(__A ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__A , exist_ok=__A ) _A = os.path.join(__A , '''qformer_tokenizer''' ) self.qformer_tokenizer.save_pretrained(__A ) return super().save_pretrained(__A , **__A ) @classmethod def __A ( cls: Optional[int] , __A: Optional[int] , **__A: List[Any] ) -> List[str]: _A = AutoTokenizer.from_pretrained(__A , subfolder='''qformer_tokenizer''' ) _A = cls._get_arguments_from_pretrained(__A , **__A ) args.append(__A ) return cls(*__A )
62
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: _A = mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) else: _A = max( mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) , mf_knapsack(i - 1 , _lowercase , _lowercase , j - wt[i - 1] ) + val[i - 1] , ) _A = val return f[i][j] def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: _A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: _A = dp[i - 1][w_] return dp[n][w_], dp def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not (isinstance(_lowercase , (list, tuple) ) and isinstance(_lowercase , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) _A = len(_lowercase ) if num_items != len(_lowercase ): _A = ( '''The number of weights must be the same as the number of values.\n''' f"""But got {num_items} weights and {len(_lowercase )} values""" ) raise ValueError(_lowercase ) for i in range(_lowercase ): if not isinstance(wt[i] , _lowercase ): _A = ( '''All weights must be integers but got weight of ''' f"""type {type(wt[i] )} at index {i}""" ) raise TypeError(_lowercase ) _A ,_A = knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) _A = set() _construct_solution(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) return optimal_val, example_optional_set def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(_lowercase , _lowercase , i - 1 , _lowercase , _lowercase ) else: optimal_set.add(_lowercase ) _construct_solution(_lowercase , _lowercase , i - 1 , j - wt[i - 1] , _lowercase ) if __name__ == "__main__": __A = [3, 2, 4, 4] __A = [4, 3, 2, 3] __A = 4 __A = 6 __A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] __A , __A = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 __A , __A = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print('optimal_value = ', optimal_solution) print('An optimal subset corresponding to the optimal value', optimal_subset)
62
1
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def __A ( _lowercase ): '''simple docstring''' if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(_lowercase , '''_dynamo''' ): return False return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule ) def __A ( _lowercase , _lowercase = True ): '''simple docstring''' _A = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) _A = is_compiled_module(_lowercase ) if is_compiled: _A = model _A = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(_lowercase , _lowercase ): _A = model.module if not keep_fpaa_wrapper: _A = getattr(_lowercase , '''forward''' ) _A = model.__dict__.pop('''_original_forward''' , _lowercase ) if original_forward is not None: while hasattr(_lowercase , '''__wrapped__''' ): _A = forward.__wrapped__ if forward == original_forward: break _A = forward if getattr(_lowercase , '''_converted_to_transformer_engine''' , _lowercase ): convert_model(_lowercase , to_transformer_engine=_lowercase ) if is_compiled: _A = model _A = compiled_model return model def __A ( ): '''simple docstring''' PartialState().wait_for_everyone() def __A ( _lowercase , _lowercase ): '''simple docstring''' if PartialState().distributed_type == DistributedType.TPU: xm.save(_lowercase , _lowercase ) elif PartialState().local_process_index == 0: torch.save(_lowercase , _lowercase ) @contextmanager def __A ( **_lowercase ): '''simple docstring''' for key, value in kwargs.items(): _A = str(_lowercase ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def __A ( _lowercase ): '''simple docstring''' if not hasattr(_lowercase , '''__qualname__''' ) and not hasattr(_lowercase , '''__name__''' ): _A = getattr(_lowercase , '''__class__''' , _lowercase ) if hasattr(_lowercase , '''__qualname__''' ): return obj.__qualname__ if hasattr(_lowercase , '''__name__''' ): return obj.__name__ return str(_lowercase ) def __A ( _lowercase , _lowercase ): '''simple docstring''' for key, value in source.items(): if isinstance(_lowercase , _lowercase ): _A = destination.setdefault(_lowercase , {} ) merge_dicts(_lowercase , _lowercase ) else: _A = value return destination def __A ( _lowercase = None ): '''simple docstring''' if port is None: _A = 2_95_00 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('''localhost''', port) ) == 0
62
def __A ( _lowercase = 1_00_00_00 ): '''simple docstring''' _A = 1 _A = 1 _A = {1: 1} for inputa in range(2 , _lowercase ): _A = 0 _A = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: _A = (3 * number) + 1 counter += 1 if inputa not in counters: _A = counter if counter > pre_counter: _A = inputa _A = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
62
1
def __A ( ): '''simple docstring''' return 1 def __A ( _lowercase ): '''simple docstring''' return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def __A ( _lowercase ): '''simple docstring''' return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_lowercase ) def __A ( _lowercase ): '''simple docstring''' return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_lowercase ) def __A ( _lowercase ): '''simple docstring''' return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_lowercase ) def __A ( _lowercase ): '''simple docstring''' return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_lowercase ) def __A ( _lowercase ): '''simple docstring''' return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(_lowercase ) def __A ( _lowercase ): '''simple docstring''' return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(_lowercase ) def __A ( _lowercase = 2_00 ): '''simple docstring''' return two_pound(_lowercase ) if __name__ == "__main__": print(solution(int(input().strip())))
62
def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = word.split() def justify(_lowercase , _lowercase , _lowercase ) -> str: _A = max_width - width _A = len(_lowercase ) if len(_lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: _A = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] _A = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] _A = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(_lowercase ): num_spaces_between_words_list[i] += 1 _A = [] for i in range(_lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(_lowercase ) _A = [] _A = [] _A = 0 for word in words: if width + len(_lowercase ) + len(_lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(_lowercase ) width += len(_lowercase ) else: # justify the line and add it to result answer.append(justify(_lowercase , _lowercase , _lowercase ) ) # reset new line and new width _A ,_A = [word], len(_lowercase ) _A = max_width - width - len(_lowercase ) answer.append(''' '''.join(_lowercase ) + (remaining_spaces + 1) * ''' ''' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
62
1
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: Tuple ) -> Union[str, Any]: _A = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() _A = dict(zip(__A , range(len(__A ) ) ) ) _A = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } _A = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 1_60_00, '''return_attention_mask''': False, '''do_normalize''': True, } _A = tempfile.mkdtemp() _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join(self.tmpdirname , __A ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__A ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__A ) + '''\n''' ) # load decoder from hub _A = '''hf-internal-testing/ngram-beam-search-decoder''' def __A ( self: Any , **__A: str ) -> Union[str, Any]: _A = self.add_kwargs_tokens_map.copy() kwargs.update(__A ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__A ) def __A ( self: List[str] , **__A: Optional[Any] ) -> Dict: return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__A ) def __A ( self: Optional[Any] , **__A: str ) -> Optional[Any]: return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__A ) def __A ( self: Tuple ) -> Dict: shutil.rmtree(self.tmpdirname ) def __A ( self: Dict ) -> Optional[int]: _A = self.get_tokenizer() _A = self.get_feature_extractor() _A = self.get_decoder() _A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) processor.save_pretrained(self.tmpdirname ) _A = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __A ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __A ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , __A ) def __A ( self: Any ) -> List[str]: _A = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match _A = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def __A ( self: str ) -> int: _A = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(__A , '''include''' ): WavaVecaProcessorWithLM( tokenizer=__A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def __A ( self: int ) -> Dict: _A = self.get_feature_extractor() _A = self.get_tokenizer() _A = self.get_decoder() _A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) _A = floats_list((3, 10_00) ) _A = feature_extractor(__A , return_tensors='''np''' ) _A = processor(__A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __A ( self: Optional[int] ) -> Optional[int]: _A = self.get_feature_extractor() _A = self.get_tokenizer() _A = self.get_decoder() _A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) _A = '''This is a test string''' _A = processor(text=__A ) _A = tokenizer(__A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self: List[Any] , __A: int=(2, 10, 16) , __A: List[Any]=77 ) -> Dict: np.random.seed(__A ) return np.random.rand(*__A ) def __A ( self: List[Any] ) -> int: _A = self.get_feature_extractor() _A = self.get_tokenizer() _A = self.get_decoder() _A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) _A = self._get_dummy_logits(shape=(10, 16) , seed=13 ) _A = processor.decode(__A ) _A = decoder.decode_beams(__A )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def __A ( self: List[Any] , __A: Any ) -> Any: _A = self.get_feature_extractor() _A = self.get_tokenizer() _A = self.get_decoder() _A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) _A = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: _A = processor.batch_decode(__A ) else: with get_context(__A ).Pool() as pool: _A = processor.batch_decode(__A , __A ) _A = list(__A ) with get_context('''fork''' ).Pool() as p: _A = decoder.decode_beams_batch(__A , __A ) _A ,_A ,_A = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(__A , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(__A , decoded_processor.logit_score ) self.assertListEqual(__A , decoded_processor.lm_score ) def __A ( self: Tuple ) -> Union[str, Any]: _A = self.get_feature_extractor() _A = self.get_tokenizer() _A = self.get_decoder() _A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) _A = self._get_dummy_logits() _A = 15 _A = -20.0 _A = -4.0 _A = processor.batch_decode( __A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , ) _A = decoded_processor_out.text _A = list(__A ) with get_context('''fork''' ).Pool() as pool: _A = decoder.decode_beams_batch( __A , __A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , ) _A = [d[0][0] for d in decoded_decoder_out] _A = [d[0][2] for d in decoded_decoder_out] _A = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(__A , __A ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , __A ) self.assertTrue(np.array_equal(__A , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , __A , atol=1e-3 ) ) self.assertTrue(np.array_equal(__A , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9_474] , __A , atol=1e-3 ) ) def __A ( self: Dict ) -> Any: _A = self.get_feature_extractor() _A = self.get_tokenizer() _A = self.get_decoder() _A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) _A = self._get_dummy_logits() _A = 2.0 _A = 5.0 _A = -20.0 _A = True _A = processor.batch_decode( __A , alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , ) _A = decoded_processor_out.text _A = list(__A ) decoder.reset_params( alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , ) with get_context('''fork''' ).Pool() as pool: _A = decoder.decode_beams_batch( __A , __A , ) _A = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(__A , __A ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , __A ) _A = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , __A ) def __A ( self: Union[str, Any] ) -> int: _A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) _A = processor.decoder.model_container[processor.decoder._model_key] _A = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() _A = os.listdir(__A ) _A = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(__A , __A ) def __A ( self: Optional[int] ) -> Dict: _A = snapshot_download('''hf-internal-testing/processor_with_lm''' ) _A = WavaVecaProcessorWithLM.from_pretrained(__A ) _A = processor.decoder.model_container[processor.decoder._model_key] _A = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() _A = os.listdir(__A ) _A = os.listdir(__A ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(__A , __A ) def __A ( self: Tuple ) -> Optional[Any]: _A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) _A = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) _A = floats_list((3, 10_00) ) _A = processor_wavaveca(__A , return_tensors='''np''' ) _A = processor_auto(__A , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) _A = self._get_dummy_logits() _A = processor_wavaveca.batch_decode(__A ) _A = processor_auto.batch_decode(__A ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def __A ( self: Optional[Any] ) -> int: _A = self.get_feature_extractor() _A = self.get_tokenizer() _A = self.get_decoder() _A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def __A ( __A: Any , __A: Tuple ) -> int: _A = [d[key] for d in offsets] return retrieved_list def __A ( self: Dict ) -> List[str]: _A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) _A = self._get_dummy_logits()[0] _A = processor.decode(__A , output_word_offsets=__A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(__A , __A ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def __A ( self: List[str] ) -> List[str]: _A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) _A = self._get_dummy_logits() _A = processor.batch_decode(__A , output_word_offsets=__A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(__A , __A ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(__A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def __A ( self: List[str] ) -> Optional[int]: import torch _A = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=__A ) _A = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) ) _A = iter(__A ) _A = next(__A ) _A = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) _A = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train _A = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): _A = model(__A ).logits.cpu().numpy() _A = processor.decode(logits[0] , output_word_offsets=__A ) _A = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate _A = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] _A = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(__A , '''word''' ) ) , __A ) self.assertEqual(''' '''.join(self.get_from_offsets(__A , '''word''' ) ) , output.text ) # output times _A = torch.tensor(self.get_from_offsets(__A , '''start_time''' ) ) _A = torch.tensor(self.get_from_offsets(__A , '''end_time''' ) ) # fmt: off _A = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] ) _A = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(__A , __A , atol=0.01 ) ) self.assertTrue(torch.allclose(__A , __A , atol=0.01 ) )
62
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __A = '\\n Text data.\n Second line of data.' __A = 'file' @pytest.fixture(scope='''session''' ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') _A = bytes(_lowercase , '''utf-8''' ) with zstd.open(_lowercase , '''wb''' ) as f: f.write(_lowercase ) return path @pytest.fixture def __A ( _lowercase ): '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , _lowercase ) , '''w''' ) as f: f.write(_lowercase ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} _A = input_paths[compression_format] _A = tmp_path / '''cache''' _A = DownloadConfig(cache_dir=_lowercase , extract_compressed_file=_lowercase ) _A = cached_path(_lowercase , download_config=_lowercase ) with open(_lowercase ) as f: _A = f.read() with open(_lowercase ) as f: _A = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = '''custom_cache''' _A = '''custom_extracted_dir''' _A = tmp_path / '''custom_extracted_path''' if default_extracted: _A = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _lowercase ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_lowercase ) ) _A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _A = xz_file _A = ( DownloadConfig(extract_compressed_file=_lowercase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowercase ) ) _A = cached_path(_lowercase , download_config=_lowercase ) assert Path(_lowercase ).parent.parts[-2:] == expected def __A ( _lowercase ): '''simple docstring''' _A = str(Path(_lowercase ).resolve() ) assert cached_path(_lowercase ) == text_file # relative path _A = str(Path(_lowercase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_lowercase ) == text_file def __A ( _lowercase ): '''simple docstring''' _A = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(_lowercase ): cached_path(_lowercase ) # relative path _A = '''./__missing_file__.txt''' with pytest.raises(_lowercase ): cached_path(_lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = get_from_cache(f"""tmp://{tmpfs_file}""" ) with open(_lowercase ) as f: _A = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( ): '''simple docstring''' with pytest.raises(_lowercase ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): http_get('''https://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): ftp_get('''ftp://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): fsspec_get('''s3://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): fsspec_head('''s3://huggingface.co''' )
62
1
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def __A ( ): '''simple docstring''' _A = ArgumentParser( description=( '''PyTorch TPU distributed training launch ''' '''helper utility that will spawn up ''' '''multiple distributed processes''' ) ) # Optional arguments for the launch helper parser.add_argument('''--num_cores''' , type=_lowercase , default=1 , help='''Number of TPU cores to use (1 or 8).''' ) # positional parser.add_argument( '''training_script''' , type=_lowercase , help=( '''The full path to the single TPU training ''' '''program/script to be launched in parallel, ''' '''followed by all the arguments for the ''' '''training script''' ) , ) # rest from the training program parser.add_argument('''training_script_args''' , nargs=_lowercase ) return parser.parse_args() def __A ( ): '''simple docstring''' _A = parse_args() # Import training_script as a module. _A = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) _A = script_fpath.stem _A = importlib.import_module(_lowercase ) # Patch sys.argv _A = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
62
import math def __A ( _lowercase ): '''simple docstring''' _A = [] _A = 2 _A = int(math.sqrt(_lowercase ) ) # Size of every segment _A = [True] * (end + 1) _A = [] while start <= end: if temp[start] is True: in_prime.append(_lowercase ) for i in range(start * start , end + 1 , _lowercase ): _A = False start += 1 prime += in_prime _A = end + 1 _A = min(2 * end , _lowercase ) while low <= n: _A = [True] * (high - low + 1) for each in in_prime: _A = math.floor(low / each ) * each if t < low: t += each for j in range(_lowercase , high + 1 , _lowercase ): _A = False for j in range(len(_lowercase ) ): if temp[j] is True: prime.append(j + low ) _A = high + 1 _A = min(high + end , _lowercase ) return prime print(sieve(10**6))
62
1
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class SCREAMING_SNAKE_CASE ( unittest.TestCase , snake_case ): """simple docstring""" def __A ( self: Tuple ) -> Union[str, Any]: _A = load_tool('''text-classification''' ) self.tool.setup() _A = load_tool('''text-classification''' , remote=__A ) def __A ( self: str ) -> Any: _A = self.tool('''That\'s quite cool''' , ['''positive''', '''negative'''] ) self.assertEqual(__A , '''positive''' ) def __A ( self: Optional[int] ) -> List[str]: _A = self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative'''] ) self.assertEqual(__A , '''positive''' ) def __A ( self: Union[str, Any] ) -> Optional[int]: _A = self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] ) self.assertEqual(__A , '''positive''' ) def __A ( self: int ) -> List[Any]: _A = self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] ) self.assertEqual(__A , '''positive''' )
62
import flax.linen as nn import jax import jax.numpy as jnp class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: Tuple ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Dict , __A: Dict ) -> Tuple: _A ,_A ,_A ,_A = hidden_states.shape _A = jax.image.resize( __A , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , ) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: List[str] ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Union[str, Any] , __A: List[Any] ) -> Union[str, Any]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = None A_ = 0.0 A_ = None A_ = jnp.floataa def __A ( self: Dict ) -> Dict: _A = self.in_channels if self.out_channels is None else self.out_channels _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = nn.Dense(__A , dtype=self.dtype ) _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Dropout(self.dropout_prob ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut _A = None if use_nin_shortcut: _A = nn.Conv( __A , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , ) def __call__( self: Dict , __A: List[Any] , __A: List[Any] , __A: Any=True ) -> List[Any]: _A = hidden_states _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.conva(__A ) _A = self.time_emb_proj(nn.swish(__A ) ) _A = jnp.expand_dims(jnp.expand_dims(__A , 1 ) , 1 ) _A = hidden_states + temb _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.dropout(__A , __A ) _A = self.conva(__A ) if self.conv_shortcut is not None: _A = self.conv_shortcut(__A ) return hidden_states + residual
62
1
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml __A = NewType('DataClass', Any) __A = NewType('DataClassType', Any) def __A ( _lowercase ): '''simple docstring''' if isinstance(_lowercase , _lowercase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def __A ( _lowercase ): '''simple docstring''' _A = {str(_lowercase ): choice for choice in choices} return lambda _lowercase : str_to_choice.get(_lowercase , _lowercase ) def __A ( *, _lowercase = None , _lowercase = None , _lowercase = dataclasses.MISSING , _lowercase = dataclasses.MISSING , _lowercase = None , **_lowercase , ): '''simple docstring''' if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls _A = {} if aliases is not None: _A = aliases if help is not None: _A = help return dataclasses.field(metadata=_lowercase , default=_lowercase , default_factory=_lowercase , **_lowercase ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = 42 def __init__( self: Optional[Any] , __A: Union[DataClassType, Iterable[DataClassType]] , **__A: List[Any] ) -> str: # To make the default appear when using --help if "formatter_class" not in kwargs: _A = ArgumentDefaultsHelpFormatter super().__init__(**__A ) if dataclasses.is_dataclass(__A ): _A = [dataclass_types] _A = list(__A ) for dtype in self.dataclass_types: self._add_dataclass_arguments(__A ) @staticmethod def __A ( __A: ArgumentParser , __A: dataclasses.Field ) -> str: _A = f"""--{field.name}""" _A = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , __A ): raise RuntimeError( '''Unresolved type detected, which should have been done with the help of ''' '''`typing.get_type_hints` method by default''' ) _A = kwargs.pop('''aliases''' , [] ) if isinstance(__A , __A ): _A = [aliases] _A = getattr(field.type , '''__origin__''' , field.type ) if origin_type is Union or (hasattr(__A , '''UnionType''' ) and isinstance(__A , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(__A ) not in field.type.__args__ ): raise ValueError( '''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because''' ''' the argument parser only supports one type per argument.''' f""" Problem encountered in field '{field.name}'.""" ) if type(__A ) not in field.type.__args__: # filter `str` in Union _A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] _A = getattr(field.type , '''__origin__''' , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) _A = ( field.type.__args__[0] if isinstance(__A , field.type.__args__[1] ) else field.type.__args__[1] ) _A = getattr(field.type , '''__origin__''' , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) _A = {} if origin_type is Literal or (isinstance(field.type , __A ) and issubclass(field.type , __A )): if origin_type is Literal: _A = field.type.__args__ else: _A = [x.value for x in field.type] _A = make_choice_type_function(kwargs['''choices'''] ) if field.default is not dataclasses.MISSING: _A = field.default else: _A = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument _A = copy(__A ) # Hack because type=bool in argparse does not behave as we want. _A = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. _A = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way _A = default # This tells argparse we accept 0 or 1 value after --field_name _A = '''?''' # This is the value that will get picked if we do --field_name (without value) _A = True elif isclass(__A ) and issubclass(__A , __A ): _A = field.type.__args__[0] _A = '''+''' if field.default_factory is not dataclasses.MISSING: _A = field.default_factory() elif field.default is dataclasses.MISSING: _A = True else: _A = field.type if field.default is not dataclasses.MISSING: _A = field.default elif field.default_factory is not dataclasses.MISSING: _A = field.default_factory() else: _A = True parser.add_argument(__A , *__A , **__A ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): _A = False parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__A ) def __A ( self: Dict , __A: DataClassType ) -> List[Any]: if hasattr(__A , '''_argument_group_name''' ): _A = self.add_argument_group(dtype._argument_group_name ) else: _A = self try: _A = get_type_hints(__A ) except NameError: raise RuntimeError( f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ '''removing line of `from __future__ import annotations` which opts in Postponed ''' '''Evaluation of Annotations (PEP 563)''' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__A ): _A = '''.'''.join(map(__A , sys.version_info[:3] ) ) raise RuntimeError( f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ '''line of `from __future__ import annotations` which opts in union types as ''' '''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ''' '''support Python versions that lower than 3.10, you need to use ''' '''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ''' '''`X | None`.''' ) from ex raise for field in dataclasses.fields(__A ): if not field.init: continue _A = type_hints[field.name] self._parse_dataclass_field(__A , __A ) def __A ( self: int , __A: Any=None , __A: int=False , __A: Any=True , __A: Optional[Any]=None , __A: Any=None , ) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): _A = [] if args_filename: args_files.append(Path(__A ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values _A = ArgumentParser() args_file_parser.add_argument(__A , type=__A , action='''append''' ) # Use only remaining args for further parsing (remove the args_file_flag) _A ,_A = args_file_parser.parse_known_args(args=__A ) _A = vars(__A ).get(args_file_flag.lstrip('''-''' ) , __A ) if cmd_args_file_paths: args_files.extend([Path(__A ) for p in cmd_args_file_paths] ) _A = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last _A = file_args + args if args is not None else file_args + sys.argv[1:] _A ,_A = self.parse_known_args(args=__A ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in vars(__A ).items() if k in keys} for k in keys: delattr(__A , __A ) _A = dtype(**__A ) outputs.append(__A ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(__A ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" ) return (*outputs,) def __A ( self: Tuple , __A: Dict[str, Any] , __A: bool = False ) -> Tuple[DataClass, ...]: _A = set(args.keys() ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) _A = dtype(**__A ) outputs.append(__A ) if not allow_extra_keys and unused_keys: raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__A )}""" ) return tuple(__A ) def __A ( self: Tuple , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: with open(Path(__A ) , encoding='''utf-8''' ) as open_json_file: _A = json.loads(open_json_file.read() ) _A = self.parse_dict(__A , allow_extra_keys=__A ) return tuple(__A ) def __A ( self: List[Any] , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: _A = self.parse_dict(yaml.safe_load(Path(__A ).read_text() ) , allow_extra_keys=__A ) return tuple(__A )
62
def __A ( _lowercase ): '''simple docstring''' _A = [0] * len(_lowercase ) _A = [] _A = [] _A = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_lowercase ) ): if indegree[i] == 0: queue.append(_lowercase ) while queue: _A = queue.pop(0 ) cnt += 1 topo.append(_lowercase ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(_lowercase ) if cnt != len(_lowercase ): print('''Cycle exists''' ) else: print(_lowercase ) # Adjacency List of Graph __A = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
62
1
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = PriorTransformer A_ = "hidden_states" @property def __A ( self: List[str] ) -> Dict: _A = 4 _A = 8 _A = 7 _A = floats_tensor((batch_size, embedding_dim) ).to(__A ) _A = floats_tensor((batch_size, embedding_dim) ).to(__A ) _A = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(__A ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def __A ( self: Union[str, Any] , __A: List[Any]=0 ) -> str: torch.manual_seed(__A ) _A = 4 _A = 8 _A = 7 _A = torch.randn((batch_size, embedding_dim) ).to(__A ) _A = torch.randn((batch_size, embedding_dim) ).to(__A ) _A = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__A ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def __A ( self: Dict ) -> int: return (4, 8) @property def __A ( self: Any ) -> Dict: return (4, 8) def __A ( self: List[Any] ) -> List[str]: _A = { '''num_attention_heads''': 2, '''attention_head_dim''': 4, '''num_layers''': 2, '''embedding_dim''': 8, '''num_embeddings''': 7, '''additional_embeddings''': 4, } _A = self.dummy_input return init_dict, inputs_dict def __A ( self: Optional[Any] ) -> str: _A ,_A = PriorTransformer.from_pretrained( '''hf-internal-testing/prior-dummy''' , output_loading_info=__A ) self.assertIsNotNone(__A ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(__A ) _A = model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def __A ( self: Tuple ) -> List[Any]: _A ,_A = self.prepare_init_args_and_inputs_for_common() _A = self.model_class(**__A ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''hidden_states''', '''timestep'''] self.assertListEqual(arg_names[:2] , __A ) def __A ( self: Optional[Any] ) -> int: _A = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' ) _A = model.to(__A ) if hasattr(__A , '''set_default_attn_processor''' ): model.set_default_attn_processor() _A = self.get_dummy_seed_input() with torch.no_grad(): _A = model(**__A )[0] _A = output[0, :5].flatten().cpu() print(__A ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. _A = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] ) self.assertTrue(torch_all_close(__A , __A , rtol=1e-2 ) ) @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: int , __A: Optional[Any]=1 , __A: List[Any]=7_68 , __A: List[str]=77 , __A: List[Any]=0 ) -> Any: torch.manual_seed(__A ) _A = batch_size _A = embedding_dim _A = num_embeddings _A = torch.randn((batch_size, embedding_dim) ).to(__A ) _A = torch.randn((batch_size, embedding_dim) ).to(__A ) _A = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__A ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def __A ( self: Optional[Any] ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]], [37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]], # fmt: on ] ) def __A ( self: Any , __A: Tuple , __A: int ) -> Dict: _A = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' , subfolder='''prior''' ) model.to(__A ) _A = self.get_dummy_seed_input(seed=__A ) with torch.no_grad(): _A = model(**__A )[0] assert list(sample.shape ) == [1, 7_68] _A = sample[0, :8].flatten().cpu() print(__A ) _A = torch.tensor(__A ) assert torch_all_close(__A , __A , atol=1e-3 )
62
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class SCREAMING_SNAKE_CASE ( snake_case , snake_case ): """simple docstring""" A_ = 1 @register_to_config def __init__( self: Any , __A: int = 10_00 , __A: Optional[Union[np.ndarray, List[float]]] = None ) -> List[str]: # set `betas`, `alphas`, `timesteps` self.set_timesteps(__A ) # standard deviation of the initial noise distribution _A = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. _A = 4 # running values _A = [] def __A ( self: str , __A: int , __A: Union[str, torch.device] = None ) -> int: _A = num_inference_steps _A = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] _A = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: _A = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: _A = torch.sin(steps * math.pi / 2 ) ** 2 _A = (1.0 - self.betas**2) ** 0.5 _A = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] _A = timesteps.to(__A ) _A = [] def __A ( self: Tuple , __A: torch.FloatTensor , __A: int , __A: torch.FloatTensor , __A: bool = True , ) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) _A = (self.timesteps == timestep).nonzero().item() _A = timestep_index + 1 _A = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(__A ) if len(self.ets ) == 1: _A = self.ets[-1] elif len(self.ets ) == 2: _A = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: _A = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: _A = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) _A = self._get_prev_sample(__A , __A , __A , __A ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__A ) def __A ( self: Optional[int] , __A: torch.FloatTensor , *__A: Tuple , **__A: List[Any] ) -> torch.FloatTensor: return sample def __A ( self: List[str] , __A: Optional[Any] , __A: Optional[Any] , __A: Any , __A: List[Any] ) -> List[Any]: _A = self.alphas[timestep_index] _A = self.betas[timestep_index] _A = self.alphas[prev_timestep_index] _A = self.betas[prev_timestep_index] _A = (sample - sigma * ets) / max(__A , 1e-8 ) _A = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self: List[str] ) -> Dict: return self.config.num_train_timesteps
62
1
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean __A = 0 __A = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __A = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right __A = tuple[int, int] class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Union[str, Any] , __A: int , __A: int , __A: int , __A: int , __A: int , __A: Node | None , ) -> None: _A = pos_x _A = pos_y _A = (pos_y, pos_x) _A = goal_x _A = goal_y _A = g_cost _A = parent _A = self.calculate_heuristic() _A = self.g_cost + self.h_cost def __A ( self: int ) -> float: _A = self.pos_x - self.goal_x _A = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(__A ) + abs(__A ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self: List[str] , __A: Node ) -> bool: return self.f_cost < other.f_cost class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Optional[int] , __A: TPosition , __A: TPosition ) -> str: _A = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __A ) _A = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , __A ) _A = [self.start] _A = [] _A = False def __A ( self: Optional[Any] ) -> list[TPosition]: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() _A = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(__A ) self.closed_nodes.append(__A ) _A = self.get_successors(__A ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(__A ) else: # retrieve the best current path _A = self.open_nodes.pop(self.open_nodes.index(__A ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(__A ) else: self.open_nodes.append(__A ) return [self.start.pos] def __A ( self: str , __A: Node ) -> list[Node]: _A = [] for action in delta: _A = parent.pos_x + action[1] _A = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( __A , __A , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __A , ) ) return successors def __A ( self: str , __A: Node | None ) -> list[TPosition]: _A = node _A = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) _A = current_node.parent path.reverse() return path class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Union[str, Any] , __A: TPosition , __A: TPosition ) -> None: _A = AStar(__A , __A ) _A = AStar(__A , __A ) _A = False def __A ( self: Tuple ) -> list[TPosition]: while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() _A = self.fwd_astar.open_nodes.pop(0 ) _A = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( __A , __A ) self.fwd_astar.closed_nodes.append(__A ) self.bwd_astar.closed_nodes.append(__A ) _A = current_bwd_node _A = current_fwd_node _A = { self.fwd_astar: self.fwd_astar.get_successors(__A ), self.bwd_astar: self.bwd_astar.get_successors(__A ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(__A ) else: # retrieve the best current path _A = astar.open_nodes.pop( astar.open_nodes.index(__A ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(__A ) else: astar.open_nodes.append(__A ) return [self.fwd_astar.start.pos] def __A ( self: List[str] , __A: Node , __A: Node ) -> list[TPosition]: _A = self.fwd_astar.retrace_path(__A ) _A = self.bwd_astar.retrace_path(__A ) bwd_path.pop() bwd_path.reverse() _A = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] __A = (0, 0) __A = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __A = time.time() __A = AStar(init, goal) __A = a_star.search() __A = time.time() - start_time print(f'AStar execution time = {end_time:f} seconds') __A = time.time() __A = BidirectionalAStar(init, goal) __A = time.time() - bd_start_time print(f'BidirectionalAStar execution time = {bd_end_time:f} seconds')
62
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A ,_A = len(_lowercase ), len(grid[0] ) if ( min(_lowercase , _lowercase ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _A = 0 count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
62
1
def __A ( _lowercase = 1_00 ): '''simple docstring''' _A = (n * (n + 1) // 2) ** 2 _A = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(f'{solution() = }')
62
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml __A = NewType('DataClass', Any) __A = NewType('DataClassType', Any) def __A ( _lowercase ): '''simple docstring''' if isinstance(_lowercase , _lowercase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def __A ( _lowercase ): '''simple docstring''' _A = {str(_lowercase ): choice for choice in choices} return lambda _lowercase : str_to_choice.get(_lowercase , _lowercase ) def __A ( *, _lowercase = None , _lowercase = None , _lowercase = dataclasses.MISSING , _lowercase = dataclasses.MISSING , _lowercase = None , **_lowercase , ): '''simple docstring''' if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls _A = {} if aliases is not None: _A = aliases if help is not None: _A = help return dataclasses.field(metadata=_lowercase , default=_lowercase , default_factory=_lowercase , **_lowercase ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = 42 def __init__( self: Optional[Any] , __A: Union[DataClassType, Iterable[DataClassType]] , **__A: List[Any] ) -> str: # To make the default appear when using --help if "formatter_class" not in kwargs: _A = ArgumentDefaultsHelpFormatter super().__init__(**__A ) if dataclasses.is_dataclass(__A ): _A = [dataclass_types] _A = list(__A ) for dtype in self.dataclass_types: self._add_dataclass_arguments(__A ) @staticmethod def __A ( __A: ArgumentParser , __A: dataclasses.Field ) -> str: _A = f"""--{field.name}""" _A = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , __A ): raise RuntimeError( '''Unresolved type detected, which should have been done with the help of ''' '''`typing.get_type_hints` method by default''' ) _A = kwargs.pop('''aliases''' , [] ) if isinstance(__A , __A ): _A = [aliases] _A = getattr(field.type , '''__origin__''' , field.type ) if origin_type is Union or (hasattr(__A , '''UnionType''' ) and isinstance(__A , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(__A ) not in field.type.__args__ ): raise ValueError( '''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because''' ''' the argument parser only supports one type per argument.''' f""" Problem encountered in field '{field.name}'.""" ) if type(__A ) not in field.type.__args__: # filter `str` in Union _A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] _A = getattr(field.type , '''__origin__''' , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) _A = ( field.type.__args__[0] if isinstance(__A , field.type.__args__[1] ) else field.type.__args__[1] ) _A = getattr(field.type , '''__origin__''' , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) _A = {} if origin_type is Literal or (isinstance(field.type , __A ) and issubclass(field.type , __A )): if origin_type is Literal: _A = field.type.__args__ else: _A = [x.value for x in field.type] _A = make_choice_type_function(kwargs['''choices'''] ) if field.default is not dataclasses.MISSING: _A = field.default else: _A = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument _A = copy(__A ) # Hack because type=bool in argparse does not behave as we want. _A = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. _A = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way _A = default # This tells argparse we accept 0 or 1 value after --field_name _A = '''?''' # This is the value that will get picked if we do --field_name (without value) _A = True elif isclass(__A ) and issubclass(__A , __A ): _A = field.type.__args__[0] _A = '''+''' if field.default_factory is not dataclasses.MISSING: _A = field.default_factory() elif field.default is dataclasses.MISSING: _A = True else: _A = field.type if field.default is not dataclasses.MISSING: _A = field.default elif field.default_factory is not dataclasses.MISSING: _A = field.default_factory() else: _A = True parser.add_argument(__A , *__A , **__A ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): _A = False parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__A ) def __A ( self: Dict , __A: DataClassType ) -> List[Any]: if hasattr(__A , '''_argument_group_name''' ): _A = self.add_argument_group(dtype._argument_group_name ) else: _A = self try: _A = get_type_hints(__A ) except NameError: raise RuntimeError( f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ '''removing line of `from __future__ import annotations` which opts in Postponed ''' '''Evaluation of Annotations (PEP 563)''' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__A ): _A = '''.'''.join(map(__A , sys.version_info[:3] ) ) raise RuntimeError( f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ '''line of `from __future__ import annotations` which opts in union types as ''' '''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ''' '''support Python versions that lower than 3.10, you need to use ''' '''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ''' '''`X | None`.''' ) from ex raise for field in dataclasses.fields(__A ): if not field.init: continue _A = type_hints[field.name] self._parse_dataclass_field(__A , __A ) def __A ( self: int , __A: Any=None , __A: int=False , __A: Any=True , __A: Optional[Any]=None , __A: Any=None , ) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): _A = [] if args_filename: args_files.append(Path(__A ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values _A = ArgumentParser() args_file_parser.add_argument(__A , type=__A , action='''append''' ) # Use only remaining args for further parsing (remove the args_file_flag) _A ,_A = args_file_parser.parse_known_args(args=__A ) _A = vars(__A ).get(args_file_flag.lstrip('''-''' ) , __A ) if cmd_args_file_paths: args_files.extend([Path(__A ) for p in cmd_args_file_paths] ) _A = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last _A = file_args + args if args is not None else file_args + sys.argv[1:] _A ,_A = self.parse_known_args(args=__A ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in vars(__A ).items() if k in keys} for k in keys: delattr(__A , __A ) _A = dtype(**__A ) outputs.append(__A ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(__A ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" ) return (*outputs,) def __A ( self: Tuple , __A: Dict[str, Any] , __A: bool = False ) -> Tuple[DataClass, ...]: _A = set(args.keys() ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) _A = dtype(**__A ) outputs.append(__A ) if not allow_extra_keys and unused_keys: raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__A )}""" ) return tuple(__A ) def __A ( self: Tuple , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: with open(Path(__A ) , encoding='''utf-8''' ) as open_json_file: _A = json.loads(open_json_file.read() ) _A = self.parse_dict(__A , allow_extra_keys=__A ) return tuple(__A ) def __A ( self: List[Any] , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: _A = self.parse_dict(yaml.safe_load(Path(__A ).read_text() ) , allow_extra_keys=__A ) return tuple(__A )
62
1